diff --git a/AGENTS.md b/AGENTS.md index 2926fb8cf6d534..74c30b95586f0e 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -16,6 +16,36 @@ When adding code, strictly follow existing similar code in similar contexts, inc After adding code, you must first conduct self-review and refactoring attempts to ensure good abstraction and reuse as much as possible. +## Logging Standards + +All log messages in FE (Java) must follow these rules. Code review must check new/modified log statements against these standards. + +### Log Level Guidelines + +- **ERROR**: System/component-level failures requiring human intervention. Must include exception stack traces and context. +- **WARN**: Recoverable abnormal situations that may affect users. Must NOT be used for messages that repeat continuously under normal conditions. +- **INFO**: Key business events, state changes, important operation completions. Must NOT be used for per-second periodic reports or per-request tracing. +- **DEBUG**: Detailed operational/debugging info such as per-RPC calls, per-tablet scheduling, per-file-system operations. + +### Grammar and Style Rules + +1. **Use correct English grammar** — avoid Chinglish patterns: + - ✅ `"Finished checking tablets"` / ❌ `"finished to check tablets"` + - ✅ `"Begin to schedule tablets"` / ❌ `"beginning to tablet scheduler"` + - ✅ `"Start saving image"` / ❌ `"start to save image"` +2. **Capitalize the first letter** of log messages. +3. **Keep acronyms uppercase**: `GC`, `RPC`, `IO`, `SQL`, `JDBC`, `LDAP`, `HDFS`. +4. **Include sufficient context**: Log messages must contain the object identifier (table name, DB name, ID, etc.) to make the log actionable. + - ✅ `"Failed to create table: db={}, table={}, reason={}"` / ❌ `"Failed to create a table."` +5. **Use consistent key=value format**: Use `key=value` with camelCase keys and no spaces around `=`. + +### Output Rules + +1. **No logging on idle cycles**: Periodic daemons must NOT log at INFO when there is zero work. Use DEBUG or skip. +2. **Aggregate high-frequency logs**: For operations that fire every second, produce periodic summaries (e.g., every 5 minutes) instead of per-event logs. +3. **Do not serialize full Thrift/Protobuf objects**: Extract and log only key fields (ID, status, names). Single INFO log lines should not exceed 200 characters. +4. **Suppress third-party library noise**: Configure log4j2 to set third-party loggers (Kerberos, Hive MetaStore client, Airlift, Hadoop security) to WARN or OFF. + ## Code Review When conducting code review (including self-review and review tasks), it is necessary to complete the key checkpoints according to our `code-review` skill and provide conclusions for each key checkpoint (if applicable) as part of the final written description. Other content does not require individual responses; just check them during the review process. diff --git a/be/src/agent/task_worker_pool.cpp b/be/src/agent/task_worker_pool.cpp index 80188d8324090f..7c47e08049c1ab 100644 --- a/be/src/agent/task_worker_pool.cpp +++ b/be/src/agent/task_worker_pool.cpp @@ -1045,7 +1045,7 @@ void update_tablet_meta_callback(StorageEngine& engine, const TAgentTaskRequest& } } - LOG(INFO) << "finish update tablet meta task. signature=" << req.signature; + LOG(INFO) << "Finished updating tablet meta task. signature=" << req.signature; if (req.signature != -1) { TFinishTaskRequest finish_task_request; finish_task_request.__set_task_status(status.to_thrift()); @@ -1782,7 +1782,7 @@ void create_tablet_callback(StorageEngine& engine, const TAgentTaskRequest& req) } }; DorisMetrics::instance()->create_tablet_requests_total->increment(1); - VLOG_NOTICE << "start to create tablet " << create_tablet_req.tablet_id; + VLOG_NOTICE << "Start creating tablet " << create_tablet_req.tablet_id; std::vector finish_tablet_infos; VLOG_NOTICE << "create tablet: " << create_tablet_req; @@ -2410,7 +2410,7 @@ void calc_delete_bitmap_callback(CloudStorageEngine& engine, const TAgentTaskReq SCOPED_ATTACH_TASK(engine_task.mem_tracker()); if (req.signature != calc_delete_bitmap_req.transaction_id) { // transaction_id may not be the same as req.signature, so add a log here - LOG_INFO("begin to execute calc delete bitmap task") + LOG_INFO("Start executing calc delete bitmap task") .tag("signature", req.signature) .tag("transaction_id", calc_delete_bitmap_req.transaction_id); } @@ -2443,7 +2443,7 @@ void make_cloud_committed_rs_visible_callback(CloudStorageEngine& engine, if (!config::enable_cloud_make_rs_visible_on_be) { return; } - LOG(INFO) << "begin to make cloud tmp rs visible, txn_id=" + LOG(INFO) << "Start making cloud tmp rs visible, txn_id=" << req.make_cloud_tmp_rs_visible_req.txn_id << ", tablet_count=" << req.make_cloud_tmp_rs_visible_req.tablet_ids.size(); diff --git a/be/src/cloud/cloud_compaction_action.cpp b/be/src/cloud/cloud_compaction_action.cpp index d9a7794edca785..322c2b66f05ad2 100644 --- a/be/src/cloud/cloud_compaction_action.cpp +++ b/be/src/cloud/cloud_compaction_action.cpp @@ -120,7 +120,7 @@ Status CloudCompactionAction::_handle_show_compaction(HttpRequest* req, std::str return Status::InternalError("check param failed: missing tablet_id"); } - LOG(INFO) << "begin to handle show compaction, tablet id: " << tablet_id; + LOG(INFO) << "Start handling show compaction, tablet id: " << tablet_id; //TabletSharedPtr tablet = _engine.tablet_manager()->get_tablet(tablet_id); CloudTabletSPtr tablet = DORIS_TRY(_engine.tablet_mgr().get_tablet(tablet_id)); @@ -129,7 +129,7 @@ Status CloudCompactionAction::_handle_show_compaction(HttpRequest* req, std::str } tablet->get_compaction_status(json_result); - LOG(INFO) << "finished to handle show compaction, tablet id: " << tablet_id; + LOG(INFO) << "Finished handling show compaction, tablet id: " << tablet_id; return Status::OK(); } @@ -139,7 +139,7 @@ Status CloudCompactionAction::_handle_run_compaction(HttpRequest* req, std::stri uint64_t tablet_id = 0; uint64_t table_id = 0; RETURN_NOT_OK_STATUS_WITH_WARN(_check_param(req, &tablet_id, &table_id), "check param failed"); - LOG(INFO) << "begin to handle run compaction, tablet id: " << tablet_id + LOG(INFO) << "Start handling run compaction, tablet id: " << tablet_id << " table id: " << table_id; // check compaction_type equals 'base' or 'cumulative' @@ -186,7 +186,7 @@ Status CloudCompactionAction::_handle_run_status_compaction(HttpRequest* req, uint64_t tablet_id = 0; RETURN_NOT_OK_STATUS_WITH_WARN(_check_param(req, &tablet_id, TABLET_ID_KEY), "check param failed"); - LOG(INFO) << "begin to handle run status compaction, tablet id: " << tablet_id; + LOG(INFO) << "Start handling run status compaction, tablet id: " << tablet_id; if (tablet_id == 0) { // overall compaction status @@ -233,7 +233,7 @@ Status CloudCompactionAction::_handle_run_status_compaction(HttpRequest* req, // not running any compaction *json_result = absl::Substitute(json_template, run_status, msg, tablet_id, compaction_type); } - LOG(INFO) << "finished to handle run status compaction, tablet id: " << tablet_id; + LOG(INFO) << "Finished handling run status compaction, tablet id: " << tablet_id; return Status::OK(); } diff --git a/be/src/cloud/cloud_delete_task.cpp b/be/src/cloud/cloud_delete_task.cpp index dc3d991df58397..1f5035b4338896 100644 --- a/be/src/cloud/cloud_delete_task.cpp +++ b/be/src/cloud/cloud_delete_task.cpp @@ -31,7 +31,7 @@ namespace doris { using namespace ErrorCode; Status CloudDeleteTask::execute(CloudStorageEngine& engine, const TPushReq& request) { - VLOG_DEBUG << "begin to process delete data. request=" << ThriftDebugString(request); + VLOG_DEBUG << "Start processing delete data. request=" << ThriftDebugString(request); if (!request.__isset.transaction_id) { return Status::InvalidArgument("transaction_id is not set"); diff --git a/be/src/cloud/cloud_engine_calc_delete_bitmap_task.cpp b/be/src/cloud/cloud_engine_calc_delete_bitmap_task.cpp index 66510d01d184fb..fd5735b9a85e7d 100644 --- a/be/src/cloud/cloud_engine_calc_delete_bitmap_task.cpp +++ b/be/src/cloud/cloud_engine_calc_delete_bitmap_task.cpp @@ -67,7 +67,7 @@ void CloudEngineCalcDeleteBitmapTask::add_succ_tablet_id(int64_t tablet_id) { Status CloudEngineCalcDeleteBitmapTask::execute() { int64_t transaction_id = _cal_delete_bitmap_req.transaction_id; OlapStopWatch watch; - VLOG_NOTICE << "begin to calculate delete bitmap. transaction_id=" << transaction_id; + VLOG_NOTICE << "Start calculating delete bitmap. transaction_id=" << transaction_id; std::unique_ptr token = _engine.calc_tablet_delete_bitmap_task_thread_pool().new_token( ThreadPool::ExecutionMode::CONCURRENT); @@ -113,7 +113,7 @@ Status CloudEngineCalcDeleteBitmapTask::execute() { // wait for all finished token->wait(); - LOG(INFO) << "finish to calculate delete bitmap on transaction." + LOG(INFO) << "Finished calculating delete bitmap on transaction." << "transaction_id=" << transaction_id << ", cost(us): " << watch.get_elapse_time_us() << ", error_tablet_size=" << _error_tablet_ids->size() << ", res=" << _res.to_string(); @@ -145,7 +145,7 @@ void CloudTabletCalcDeleteBitmapTask::set_tablet_state(int64_t tablet_state) { } Status CloudTabletCalcDeleteBitmapTask::handle() const { - VLOG_DEBUG << "start calculate delete bitmap on tablet " << _tablet_id + VLOG_DEBUG << "Start calculating delete bitmap on tablet " << _tablet_id << ", txn_id=" << _transaction_id; SCOPED_ATTACH_TASK(_mem_tracker); int64_t t1 = MonotonicMicros(); @@ -236,8 +236,8 @@ Status CloudTabletCalcDeleteBitmapTask::handle() const { for (const auto& sub_txn_id : _sub_txn_ids) { ss << sub_txn_id << ", "; } - LOG(INFO) << "start calc delete bitmap for txn_id=" << _transaction_id << ", sub_txn_ids=[" - << ss.str() << "], table_id=" << tablet->table_id() + LOG(INFO) << "Start calculating delete bitmap for txn_id=" << _transaction_id + << ", sub_txn_ids=[" << ss.str() << "], table_id=" << tablet->table_id() << ", partition_id=" << tablet->partition_id() << ", tablet_id=" << _tablet_id << ", start_version=" << _version; std::vector invisible_rowsets; @@ -255,7 +255,7 @@ Status CloudTabletCalcDeleteBitmapTask::handle() const { empty_rowset_count++; continue; } - LOG(INFO) << "start calc delete bitmap for txn_id=" << _transaction_id + LOG(INFO) << "Start calculating delete bitmap for txn_id=" << _transaction_id << ", sub_txn_id=" << sub_txn_id << ", table_id=" << tablet->table_id() << ", partition_id=" << tablet->partition_id() << ", tablet_id=" << _tablet_id << ", start_version=" << _version << ", cur_version=" << version; @@ -283,7 +283,7 @@ Status CloudTabletCalcDeleteBitmapTask::handle() const { } }); auto total_update_delete_bitmap_time_us = MonotonicMicros() - t3; - LOG(INFO) << "finish calculate delete bitmap on tablet" + LOG(INFO) << "Finished calculating delete bitmap on tablet" << ", table_id=" << tablet->table_id() << ", transaction_id=" << _transaction_id << ", tablet_id=" << tablet->tablet_id() << ", get_tablet_time_us=" << get_tablet_time_us diff --git a/be/src/cloud/cloud_rowset_writer.cpp b/be/src/cloud/cloud_rowset_writer.cpp index 47cc3865c1bf65..612341f92aac97 100644 --- a/be/src/cloud/cloud_rowset_writer.cpp +++ b/be/src/cloud/cloud_rowset_writer.cpp @@ -82,7 +82,7 @@ Status CloudRowsetWriter::init(const RowsetWriterContext& rowset_writer_context) } Status CloudRowsetWriter::_build_rowset_meta(RowsetMeta* rowset_meta, bool check_segment_num) { - VLOG_NOTICE << "start to build rowset meta. tablet_id=" << rowset_meta->tablet_id() + VLOG_NOTICE << "Start building rowset meta. tablet_id=" << rowset_meta->tablet_id() << ", rowset_id=" << rowset_meta->rowset_id() << ", check_segment_num=" << check_segment_num; // Call base class implementation @@ -162,7 +162,7 @@ Status CloudRowsetWriter::build(RowsetSharedPtr& rowset) { } Status CloudRowsetWriter::_collect_all_packed_slice_locations(RowsetMeta* rowset_meta) { - VLOG_NOTICE << "start to collect packed slice locations for rowset meta. tablet_id=" + VLOG_NOTICE << "Start collecting packed slice locations for rowset meta. tablet_id=" << rowset_meta->tablet_id() << ", rowset_id=" << rowset_meta->rowset_id(); if (!_context.packed_file_active) { return Status::OK(); diff --git a/be/src/cloud/cloud_snapshot_loader.cpp b/be/src/cloud/cloud_snapshot_loader.cpp index 2dae8162c425be..6b697caa75f82d 100644 --- a/be/src/cloud/cloud_snapshot_loader.cpp +++ b/be/src/cloud/cloud_snapshot_loader.cpp @@ -78,7 +78,7 @@ Status CloudSnapshotLoader::download(const std::map& s return Status::InternalError("Storage backend not initialized."); } - LOG(INFO) << "begin to transfer snapshot files. num: " << src_to_dest_path.size() + LOG(INFO) << "Start transferring snapshot files. num: " << src_to_dest_path.size() << ", broker addr: " << _broker_addr << ", job: " << _job_id << ", task id: " << _task_id; @@ -187,7 +187,7 @@ Status CloudSnapshotLoader::download(const std::map& s RETURN_IF_ERROR(_engine.cloud_snapshot_mgr().make_snapshot( target_tablet_id, *_storage_resource, file_mapping, true, &hdr_slice)); - LOG(INFO) << "finish to make snapshot for tablet: " << target_tablet_id; + LOG(INFO) << "Finished making snapshot for tablet: " << target_tablet_id; // 1.5. download files for (auto& nested_iter : remote_files) { @@ -202,7 +202,7 @@ Status CloudSnapshotLoader::download(const std::map& s std::string target_file = find->second; std::string full_remote_file = remote_path + "/" + remote_file + "." + file_stat.md5; std::string full_target_file = target_path + "/" + target_file; - LOG(INFO) << "begin to download from " << full_remote_file << " to " + LOG(INFO) << "Start downloading from " << full_remote_file << " to " << full_target_file; io::FileReaderOptions nested_reader_options { .cache_type = io::FileCachePolicy::NO_CACHE, @@ -239,7 +239,7 @@ Status CloudSnapshotLoader::download(const std::map& s // (TODO) Add bvar metrics to track download time } // end for src_to_dest_path - LOG(INFO) << "finished to download snapshots. job: " << _job_id << ", task id: " << _task_id; + LOG(INFO) << "Finished downloading snapshots. job: " << _job_id << ", task id: " << _task_id; return status; } diff --git a/be/src/cloud/cloud_tablet.cpp b/be/src/cloud/cloud_tablet.cpp index 2567019301cff5..908bf8bad541a4 100644 --- a/be/src/cloud/cloud_tablet.cpp +++ b/be/src/cloud/cloud_tablet.cpp @@ -493,7 +493,7 @@ void CloudTablet::delete_rowsets(const std::vector& to_delete, uint64_t CloudTablet::delete_expired_stale_rowsets() { if (config::enable_mow_verbose_log) { - LOG_INFO("begin delete_expired_stale_rowset for tablet={}", tablet_id()); + LOG_INFO("Begin to delete_expired_stale_rowset for tablet={}", tablet_id()); } std::vector expired_rowsets; // ATTN: trick, Use stale_rowsets to temporarily increase the reference count of the rowset shared pointer in _stale_rs_version_map so that in the recycle_cached_data function, it checks if the reference count is 2. @@ -554,7 +554,7 @@ uint64_t CloudTablet::delete_expired_stale_rowsets() { manager.recycle_cache(tablet_id(), recycled_rowsets); } if (config::enable_mow_verbose_log) { - LOG_INFO("finish delete_expired_stale_rowset for tablet={}", tablet_id()); + LOG_INFO("Finished deleting_expired_stale_rowset for tablet={}", tablet_id()); } add_unused_rowsets(expired_rowsets); diff --git a/be/src/cloud/cloud_tablet_mgr.cpp b/be/src/cloud/cloud_tablet_mgr.cpp index 3e979864138645..1e272db33c4511 100644 --- a/be/src/cloud/cloud_tablet_mgr.cpp +++ b/be/src/cloud/cloud_tablet_mgr.cpp @@ -295,7 +295,7 @@ void CloudTabletMgr::erase_tablet(int64_t tablet_id) { } void CloudTabletMgr::vacuum_stale_rowsets(const CountDownLatch& stop_latch) { - LOG_INFO("begin to vacuum stale rowsets"); + LOG_INFO("Start vacuuming stale rowsets"); std::vector> tablets_to_vacuum; tablets_to_vacuum.reserve(_tablet_map->size()); _tablet_map->traverse([&tablets_to_vacuum](auto&& t) { @@ -311,12 +311,12 @@ void CloudTabletMgr::vacuum_stale_rowsets(const CountDownLatch& stop_latch) { num_vacuumed += t->delete_expired_stale_rowsets(); } - LOG_INFO("finish vacuum stale rowsets") + LOG_INFO("Finished vacuuming stale rowsets") .tag("num_vacuumed", num_vacuumed) .tag("num_tablets", tablets_to_vacuum.size()); { - LOG_INFO("begin to remove unused rowsets"); + LOG_INFO("Start removing unused rowsets"); std::vector> tablets_to_remove_unused_rowsets; tablets_to_remove_unused_rowsets.reserve(_tablet_map->size()); _tablet_map->traverse([&tablets_to_remove_unused_rowsets](auto&& t) { @@ -327,7 +327,7 @@ void CloudTabletMgr::vacuum_stale_rowsets(const CountDownLatch& stop_latch) { for (auto& t : tablets_to_remove_unused_rowsets) { t->remove_unused_rowsets(); } - LOG_INFO("finish remove unused rowsets") + LOG_INFO("Finished removing unused rowsets") .tag("num_tablets", tablets_to_remove_unused_rowsets.size()); if (config::enable_check_agg_and_remove_pre_rowsets_delete_bitmap) { int64_t max_useless_rowset_count = 0; @@ -352,7 +352,7 @@ void CloudTabletMgr::vacuum_stale_rowsets(const CountDownLatch& stop_latch) { g_max_rowsets_with_useless_delete_bitmap.set_value(max_useless_rowset_count); g_max_rowsets_with_useless_delete_bitmap_version.set_value( max_useless_rowset_version_count); - LOG(INFO) << "finish check_agg_delete_bitmap_for_stale_rowsets, cost(us)=" + LOG(INFO) << "Finished check_agg_delete_bitmap_for_stale_rowsetsing, cost(us)=" << watch.get_elapse_time_us() << ". max useless rowset count=" << max_useless_rowset_count << ", tablet_id=" << tablet_id_with_max_useless_rowset_count @@ -374,7 +374,7 @@ std::vector> CloudTabletMgr::get_weak_tablets() { } void CloudTabletMgr::sync_tablets(const CountDownLatch& stop_latch) { - LOG_INFO("begin to sync tablets"); + LOG_INFO("Start syncing tablets"); int64_t last_sync_time_bound = ::time(nullptr) - config::tablet_sync_interval_s; auto weak_tablets = get_weak_tablets(); @@ -421,7 +421,7 @@ void CloudTabletMgr::sync_tablets(const CountDownLatch& stop_latch) { } } } - LOG_INFO("finish sync tablets").tag("num_sync", num_sync); + LOG_INFO("Finished syncing tablets").tag("num_sync", num_sync); } Status CloudTabletMgr::get_topn_tablets_to_compact( @@ -523,7 +523,7 @@ Status CloudTabletMgr::get_topn_tablets_to_compact( void CloudTabletMgr::build_all_report_tablets_info(std::map* tablets_info, uint64_t* tablet_num) { DCHECK(tablets_info != nullptr); - VLOG_NOTICE << "begin to build all report cloud tablets info"; + VLOG_NOTICE << "Start building all report cloud tablets info"; HistogramStat tablet_version_num_hist; diff --git a/be/src/exec/operator/olap_scan_operator.cpp b/be/src/exec/operator/olap_scan_operator.cpp index d94d9598dab875..1627408574ec70 100644 --- a/be/src/exec/operator/olap_scan_operator.cpp +++ b/be/src/exec/operator/olap_scan_operator.cpp @@ -774,7 +774,7 @@ Status OlapScanLocalState::prepare(RuntimeState* state) { } if (config::enable_mow_verbose_log && _tablets[i].tablet->enable_unique_key_merge_on_write()) { - LOG_INFO("finish capture_rs_readers for tablet={}, query_id={}", + LOG_INFO("Finished capturing_rs_readers for tablet={}, query_id={}", _tablets[i].tablet->tablet_id(), print_id(PipelineXLocalState<>::_state->query_id())); } diff --git a/be/src/exec/scan/scanner_scheduler.h b/be/src/exec/scan/scanner_scheduler.h index de1553b026ed35..f520d97d6dc488 100644 --- a/be/src/exec/scan/scanner_scheduler.h +++ b/be/src/exec/scan/scanner_scheduler.h @@ -160,7 +160,7 @@ class ThreadPoolSimplifiedScanScheduler MOCK_REMOVE(final) : public ScannerSched #ifndef BE_TEST stop(); #endif - LOG(INFO) << "Scanner sche " << _sched_name << " shutdown"; + LOG(INFO) << "Scanner scheduler " << _sched_name << " shutdown"; } void stop() override { @@ -260,7 +260,7 @@ class TaskExecutorSimplifiedScanScheduler final : public ScannerScheduler { #ifndef BE_TEST stop(); #endif - LOG(INFO) << "Scanner sche " << _sched_name << " shutdown"; + LOG(INFO) << "Scanner scheduler " << _sched_name << " shutdown"; } void stop() override { diff --git a/be/src/exec/sink/writer/vtablet_writer.cpp b/be/src/exec/sink/writer/vtablet_writer.cpp index 39b23500e063b1..671960739f6eed 100644 --- a/be/src/exec/sink/writer/vtablet_writer.cpp +++ b/be/src/exec/sink/writer/vtablet_writer.cpp @@ -1980,7 +1980,7 @@ Status VTabletWriter::close(Status exec_status) { // print log of add batch time of all node, for tracing load performance easily std::stringstream ss; - ss << "finished to close olap table sink. load_id=" << print_id(_load_id) + ss << "Finished closing olap table sink. load_id=" << print_id(_load_id) << ", txn_id=" << _txn_id << ", node add batch time(ms)/wait execution time(ms)/close time(ms)/num: "; for (auto const& pair : node_add_batch_counter_map) { diff --git a/be/src/exec/sink/writer/vtablet_writer_v2.cpp b/be/src/exec/sink/writer/vtablet_writer_v2.cpp index d978b95fb0bae6..41c5655de5eff3 100644 --- a/be/src/exec/sink/writer/vtablet_writer_v2.cpp +++ b/be/src/exec/sink/writer/vtablet_writer_v2.cpp @@ -751,7 +751,7 @@ Status VTabletWriterV2::close(Status exec_status) { _row_distribution.output_profile_info(_operator_profile); } - LOG(INFO) << "finished to close olap table sink. load_id=" << print_id(_load_id) + LOG(INFO) << "Finished closing olap table sink. load_id=" << print_id(_load_id) << ", txn_id=" << _txn_id; } else { _cancel(status); diff --git a/be/src/exprs/function/match.cpp b/be/src/exprs/function/match.cpp index 47f5e95f357e5f..d5ebfbc954a0b9 100644 --- a/be/src/exprs/function/match.cpp +++ b/be/src/exprs/function/match.cpp @@ -124,7 +124,7 @@ Status FunctionMatchBase::execute_impl(FunctionContext* context, Block& block, auto match_query_str = type_ptr->to_string(*column_ptr, 0, format_options); std::string column_name = block.get_by_position(arguments[0]).name; - VLOG_DEBUG << "begin to execute match directly, column_name=" << column_name + VLOG_DEBUG << "Start executing match directly, column_name=" << column_name << ", match_query_str=" << match_query_str; auto* analyzer_ctx = get_match_analyzer_ctx(context); const ColumnPtr source_col = @@ -198,7 +198,7 @@ std::vector FunctionMatchBase::analyse_query_str_token( return query_tokens; } - VLOG_DEBUG << "begin to run " << get_name() << ", parser_type: " + VLOG_DEBUG << "Start running " << get_name() << ", parser_type: " << inverted_index_parser_type_to_string(analyzer_ctx->parser_type); // Decision is based on parser_type (from index properties): @@ -500,7 +500,7 @@ Status FunctionMatchRegexp::execute_match(FunctionContext* context, const std::s ColumnUInt8::Container& result) const { RETURN_IF_ERROR(check(context, name)); - VLOG_DEBUG << "begin to run FunctionMatchRegexp::execute_match, parser_type: " + VLOG_DEBUG << "Start running FunctionMatchRegexp::execute_match, parser_type: " << (analyzer_ctx ? inverted_index_parser_type_to_string(analyzer_ctx->parser_type) : "unknown"); diff --git a/be/src/io/cache/cache_lru_dumper.cpp b/be/src/io/cache/cache_lru_dumper.cpp index 43275f5069e614..3b655864099a7b 100644 --- a/be/src/io/cache/cache_lru_dumper.cpp +++ b/be/src/io/cache/cache_lru_dumper.cpp @@ -328,7 +328,8 @@ void CacheLRUDumper::do_dump_queue(LRUQueue& queue, const std::string& queue_nam fmt::format("{}/lru_dump_{}.tail", _mgr->_cache_base_path, queue_name); std::ofstream out(tmp_filename, std::ios::binary); if (out) { - LOG(INFO) << "begin dump " << queue_name << " with " << elements.size() << " elements"; + LOG(INFO) << "Begin to dump " << queue_name << " with " << elements.size() + << " elements"; for (const auto& [hash, offset, size] : elements) { RETURN_IF_STATUS_ERROR(st, dump_one_lru_entry(out, tmp_filename, hash, offset, size)); @@ -492,7 +493,7 @@ void CacheLRUDumper::restore_queue(LRUQueue& queue, const std::string& queue_nam } in.close(); } else { - LOG(INFO) << "no lru dump file is founded for " << queue_name; + LOG(INFO) << "No LRU dump file found for " << queue_name; } LOG(INFO) << "lru restore time costs: " << (duration_ns / 1000) << "us."; }; diff --git a/be/src/io/cache/fs_file_cache_storage.cpp b/be/src/io/cache/fs_file_cache_storage.cpp index aa8782d761ba67..b3bbfad248cadb 100644 --- a/be/src/io/cache/fs_file_cache_storage.cpp +++ b/be/src/io/cache/fs_file_cache_storage.cpp @@ -464,8 +464,8 @@ Status FSFileCacheStorage::upgrade_cache_dir_if_necessary() const { "(LOSING ALL THE CACHE)."; exit(-1); } else if (version == "2.0") { - LOG(INFO) << "Cache will upgrade from 2.0 to 3.0 progressively during running. 2.0 data " - "format will evict eventually."; + VLOG_DEBUG << "Cache will upgrade from 2.0 to 3.0 progressively during running. 2.0 data " + "format will evict eventually."; return Status::OK(); } else if (version == "3.0") { LOG(INFO) << "Readly 3.0 format, no need to upgrade."; diff --git a/be/src/io/fs/benchmark/base_benchmark.h b/be/src/io/fs/benchmark/base_benchmark.h index 4c75b3ff9e831f..941300bd3f457c 100644 --- a/be/src/io/fs/benchmark/base_benchmark.h +++ b/be/src/io/fs/benchmark/base_benchmark.h @@ -106,7 +106,7 @@ class BaseBenchmark { } Status read(benchmark::State& state, FileReaderSPtr reader) { - bm_log("begin to read {}, thread: {}", _name, state.thread_index()); + bm_log("Start reading {}, thread: {}", _name, state.thread_index()); size_t buffer_size = _conf_map.contains("buffer_size") ? std::stol(_conf_map["buffer_size"]) : 1000000L; std::vector buffer; @@ -150,13 +150,13 @@ class BaseBenchmark { if (status.ok() && reader != nullptr) { status = reader->close(); } - bm_log("finish to read {}, thread: {}, size {}, seconds: {}, status: {}", _name, + bm_log("Finished reading {}, thread: {}, size {}, seconds: {}, status: {}", _name, state.thread_index(), read_size, elapsed_seconds.count(), status); return status; } Status write(benchmark::State& state, FileWriter* writer) { - bm_log("begin to write {}, thread: {}, size: {}", _name, state.thread_index(), _file_size); + bm_log("Start writing {}, thread: {}, size: {}", _name, state.thread_index(), _file_size); size_t write_size = _file_size; size_t buffer_size = _conf_map.contains("buffer_size") ? std::stol(_conf_map["buffer_size"]) : 1000000L; @@ -190,7 +190,7 @@ class BaseBenchmark { state.counters["WriteTotal(B)"] = write_size; state.counters["WriteTime(S)"] = elapsed_seconds.count(); - bm_log("finish to write {}, thread: {}, size: {}, seconds: {}, status: {}", _name, + bm_log("Finished writing {}, thread: {}, size: {}, seconds: {}, status: {}", _name, state.thread_index(), write_size, elapsed_seconds.count(), status); return status; } diff --git a/be/src/io/fs/broker_file_system.cpp b/be/src/io/fs/broker_file_system.cpp index 5d9dea297ba3e1..c8fa03a902d435 100644 --- a/be/src/io/fs/broker_file_system.cpp +++ b/be/src/io/fs/broker_file_system.cpp @@ -285,7 +285,7 @@ Status BrokerFileSystem::list_impl(const Path& dir, bool only_file, std::vector< return Status::IOError("failed to list dir {}: {}", dir.native(), error_msg(list_rep.opStatus.message)); } - LOG(INFO) << "finished to list files from remote path. file num: " << list_rep.files.size(); + LOG(INFO) << "Finished listing files from remote path. file num: " << list_rep.files.size(); *exists = true; // split file name and checksum @@ -301,7 +301,7 @@ Status BrokerFileSystem::list_impl(const Path& dir, bool only_file, std::vector< files->emplace_back(std::move(file_info)); } - LOG(INFO) << "finished to split files. valid file num: " << files->size(); + LOG(INFO) << "Finished splitting files. valid file num: " << files->size(); } catch (apache::thrift::TException& e) { std::stringstream ss; ss << "failed to list files in remote path: " << dir << ", msg: " << e.what(); @@ -336,7 +336,7 @@ Status BrokerFileSystem::rename_impl(const Path& orig_name, const Path& new_name new_name.native(), error_msg(e.what())); } - LOG(INFO) << "finished to rename file. orig: " << orig_name << ", new: " << new_name; + LOG(INFO) << "Finished renaming file. orig: " << orig_name << ", new: " << new_name; return Status::OK(); } @@ -373,7 +373,7 @@ Status BrokerFileSystem::upload_impl(const Path& local_file, const Path& remote_ // close manually, because we need to check its close status RETURN_IF_ERROR(broker_writer->close()); - LOG(INFO) << "finished to write file via broker. file: " << local_file + LOG(INFO) << "Finished writing file via broker. file: " << local_file << ", length: " << file_len; return Status::OK(); } diff --git a/be/src/io/fs/hdfs_file_system.cpp b/be/src/io/fs/hdfs_file_system.cpp index 7c71603f07c58f..c3fff9f5f2f423 100644 --- a/be/src/io/fs/hdfs_file_system.cpp +++ b/be/src/io/fs/hdfs_file_system.cpp @@ -228,7 +228,7 @@ Status HdfsFileSystem::rename_impl(const Path& orig_name, const Path& new_name) Path normal_new_name = convert_path(new_name, _fs_name); int ret = hdfsRename(_fs_handler->hdfs_fs, normal_orig_name.c_str(), normal_new_name.c_str()); if (ret == 0) { - LOG(INFO) << "finished to rename file. orig: " << normal_orig_name + LOG(INFO) << "Finished renaming file. orig: " << normal_orig_name << ", new: " << normal_new_name; return Status::OK(); } else { diff --git a/be/src/load/delta_writer/push_handler.cpp b/be/src/load/delta_writer/push_handler.cpp index 8996082adfcf38..ee36f9accac50e 100644 --- a/be/src/load/delta_writer/push_handler.cpp +++ b/be/src/load/delta_writer/push_handler.cpp @@ -85,7 +85,7 @@ using namespace ErrorCode; Status PushHandler::process_streaming_ingestion(TabletSharedPtr tablet, const TPushReq& request, PushType push_type, std::vector* tablet_info_vec) { - LOG(INFO) << "begin to realtime push. tablet=" << tablet->tablet_id() + LOG(INFO) << "Start realtimeing push. tablet=" << tablet->tablet_id() << ", transaction_id=" << request.transaction_id; Status res = Status::OK(); @@ -228,7 +228,7 @@ Status PushHandler::_convert_v2(TabletSharedPtr cur_tablet, RowsetSharedPtr* cur load_id.set_lo(0); do { - VLOG_NOTICE << "start to convert delta file."; + VLOG_NOTICE << "Start converting delta file."; // 1. init RowsetBuilder of cur_tablet for current push VLOG_NOTICE << "init rowset builder. tablet=" << cur_tablet->tablet_id() @@ -271,7 +271,7 @@ Status PushHandler::_convert_v2(TabletSharedPtr cur_tablet, RowsetSharedPtr* cur Block block; // 4. Read data from broker and write into cur_tablet - VLOG_NOTICE << "start to convert etl file to delta."; + VLOG_NOTICE << "Start converting etl file to delta."; while (!reader->eof()) { st = reader->next(&block); if (!st.ok()) { diff --git a/be/src/load/group_commit/group_commit_mgr.cpp b/be/src/load/group_commit/group_commit_mgr.cpp index 6e2862f4297340..fc59fb4a27927c 100644 --- a/be/src/load/group_commit/group_commit_mgr.cpp +++ b/be/src/load/group_commit/group_commit_mgr.cpp @@ -619,7 +619,7 @@ GroupCommitMgr::GroupCommitMgr(ExecEnv* exec_env) : _exec_env(exec_env) { } GroupCommitMgr::~GroupCommitMgr() { - LOG(INFO) << "GroupCommitMgr is destoried"; + LOG(INFO) << "GroupCommitMgr is destroyed"; } void GroupCommitMgr::stop() { diff --git a/be/src/load/group_commit/wal/wal_manager.cpp b/be/src/load/group_commit/wal/wal_manager.cpp index 06d009404f7efa..0538acfdf240f6 100644 --- a/be/src/load/group_commit/wal/wal_manager.cpp +++ b/be/src/load/group_commit/wal/wal_manager.cpp @@ -56,7 +56,7 @@ WalManager::WalManager(ExecEnv* exec_env, const std::string& wal_dir_list) } WalManager::~WalManager() { - LOG(INFO) << "WalManager is destoried"; + LOG(INFO) << "WalManager is destroyed"; } bool WalManager::is_running() { @@ -377,7 +377,7 @@ Status WalManager::_scan_wals(const std::string& wal_path, std::vectorwait_for(l, std::chrono::seconds(180)) == std::cv_status::timeout) { LOG(WARNING) << "wait for " << wal_id << " is time out"; } diff --git a/be/src/load/memtable/memtable_flush_executor.cpp b/be/src/load/memtable/memtable_flush_executor.cpp index 382235d5942cef..8a15ef7d1245b1 100644 --- a/be/src/load/memtable/memtable_flush_executor.cpp +++ b/be/src/load/memtable/memtable_flush_executor.cpp @@ -182,7 +182,7 @@ Status FlushToken::_try_reserve_memory(const std::shared_ptr& r } Status FlushToken::_do_flush_memtable(MemTable* memtable, int32_t segment_id, int64_t* flush_size) { - VLOG_CRITICAL << "begin to flush memtable for tablet: " << memtable->tablet_id() + VLOG_CRITICAL << "Start flushing memtable for tablet: " << memtable->tablet_id() << ", memsize: " << PrettyPrinter::print_bytes(memtable->memory_usage()) << ", rows: " << memtable->stat().raw_rows; memtable->update_mem_type(MemType::FLUSH); diff --git a/be/src/load/routine_load/data_consumer.cpp b/be/src/load/routine_load/data_consumer.cpp index 71e0e85941e168..4738108f27a56a 100644 --- a/be/src/load/routine_load/data_consumer.cpp +++ b/be/src/load/routine_load/data_consumer.cpp @@ -147,7 +147,7 @@ Status KafkaDataConsumer::init(std::shared_ptr ctx) { return Status::InternalError("PAUSE: failed to create kafka consumer: " + errstr); } - VLOG_NOTICE << "finished to init kafka consumer. " << ctx->brief(); + VLOG_NOTICE << "Finished initing kafka consumer. " << ctx->brief(); _init = true; return Status::OK(); diff --git a/be/src/load/routine_load/routine_load_task_executor.cpp b/be/src/load/routine_load/routine_load_task_executor.cpp index 8364de65e037b7..54c4d959d81ede 100644 --- a/be/src/load/routine_load/routine_load_task_executor.cpp +++ b/be/src/load/routine_load/routine_load_task_executor.cpp @@ -389,7 +389,7 @@ void RoutineLoadTaskExecutor::exec_task(std::shared_ptr ctx, } \ } while (false); - LOG(INFO) << "begin to execute routine load task: " << ctx->brief(); + LOG(INFO) << "Start executing routine load task: " << ctx->brief(); // create data consumer group std::shared_ptr consumer_grp; diff --git a/be/src/load/stream_load/stream_load_executor.cpp b/be/src/load/stream_load/stream_load_executor.cpp index 08fc3bb34b15a9..cabcb2a4d38e0d 100644 --- a/be/src/load/stream_load/stream_load_executor.cpp +++ b/be/src/load/stream_load/stream_load_executor.cpp @@ -80,7 +80,7 @@ Status StreamLoadExecutor::execute_plan_fragment( #ifndef BE_TEST ctx->put_result.pipeline_params.query_options.__set_enable_strict_cast(false); ctx->start_write_data_nanos = MonotonicNanos(); - LOG(INFO) << "begin to execute stream load. label=" << ctx->label << ", txn_id=" << ctx->txn_id + LOG(INFO) << "Start executing stream load. label=" << ctx->label << ", txn_id=" << ctx->txn_id << ", query_id=" << ctx->id; Status st; std::shared_ptr is_prepare_success = std::make_shared(false); diff --git a/be/src/runtime/exec_env_init.cpp b/be/src/runtime/exec_env_init.cpp index 3b46ca53cb5085..1fc6e757a841bc 100644 --- a/be/src/runtime/exec_env_init.cpp +++ b/be/src/runtime/exec_env_init.cpp @@ -470,7 +470,7 @@ Status ExecEnv::_init(const std::vector& store_paths, // when user not sepcify a workload group in FE, then query could // use dummy workload group. Status ExecEnv::_create_internal_workload_group() { - LOG(INFO) << "begin create internal workload group."; + LOG(INFO) << "Begin to create internal workload group."; RETURN_IF_ERROR(_workload_group_manager->create_internal_wg()); return Status::OK(); @@ -829,7 +829,7 @@ void ExecEnv::destroy() { SAFE_STOP(_external_scan_context_mgr); SAFE_STOP(_fragment_mgr); SAFE_STOP(_runtime_filter_timer_queue); - // NewLoadStreamMgr should be destoried before storage_engine & after fragment_mgr stopped. + // NewLoadStreamMgr should be destroyed before storage_engine & after fragment_mgr stopped. _load_stream_mgr.reset(); _new_load_stream_mgr.reset(); _stream_load_executor.reset(); @@ -838,7 +838,7 @@ void ExecEnv::destroy() { _load_stream_map_pool.reset(); SAFE_STOP(_write_cooldown_meta_executors); - // _id_manager must be destoried before tablet schema cache + // _id_manager must be destroyed before tablet schema cache SAFE_DELETE(_id_manager); // Stop cluster info background worker before storage engine is destroyed @@ -846,7 +846,7 @@ void ExecEnv::destroy() { static_cast(_cluster_info)->stop_bg_worker(); } - // StorageEngine must be destoried before _cache_manager destory + // StorageEngine must be destroyed before _cache_manager destory SAFE_STOP(_storage_engine); _storage_engine.reset(); @@ -882,7 +882,7 @@ void ExecEnv::destroy() { SAFE_DELETE(_tablet_schema_cache); SAFE_DELETE(_tablet_column_object_pool); - // _storage_page_cache must be destoried before _cache_manager + // _storage_page_cache must be destroyed before _cache_manager SAFE_DELETE(_storage_page_cache); SAFE_DELETE(_small_file_mgr); @@ -931,12 +931,12 @@ void ExecEnv::destroy() { SAFE_DELETE(_external_scan_context_mgr); SAFE_DELETE(_user_function_cache); - // cache_manager must be destoried after all cache. + // cache_manager must be destroyed after all cache. // https://github.com/apache/doris/issues/24082#issuecomment-1712544039 SAFE_DELETE(_cache_manager); _file_cache_open_fd_cache.reset(nullptr); - // _heartbeat_flags must be destoried after staroge engine + // _heartbeat_flags must be destroyed after staroge engine SAFE_DELETE(_heartbeat_flags); // Master Info is a thrift object, it could be the last one to deconstruct. @@ -973,7 +973,7 @@ void ExecEnv::destroy() { clear_storage_resource(); PythonServerManager::instance().shutdown(); - LOG(INFO) << "Doris exec envorinment is destoried."; + LOG(INFO) << "Doris exec environment is destroyed."; } } // namespace doris diff --git a/be/src/runtime/small_file_mgr.cpp b/be/src/runtime/small_file_mgr.cpp index bfcded942f3c9b..d3f60a3d6b4bf7 100644 --- a/be/src/runtime/small_file_mgr.cpp +++ b/be/src/runtime/small_file_mgr.cpp @@ -228,7 +228,7 @@ Status SmallFileMgr::_download_file(int64_t file_id, const std::string& md5, *file_path = real_file_path; - LOG(INFO) << "finished to download file: " << file_path; + LOG(INFO) << "Finished downloading file: " << file_path; return Status::OK(); } diff --git a/be/src/runtime/snapshot_loader.cpp b/be/src/runtime/snapshot_loader.cpp index 6ffcc598d45f7d..5fdda634909352 100644 --- a/be/src/runtime/snapshot_loader.cpp +++ b/be/src/runtime/snapshot_loader.cpp @@ -683,7 +683,7 @@ Status SnapshotHttpDownloader::_delete_orphan_files() { // delete std::string full_local_file = _local_path + "/" + local_file; - LOG(INFO) << "begin to delete local snapshot file: " << full_local_file + LOG(INFO) << "Start deleting local snapshot file: " << full_local_file << ", it does not exist in remote"; if (remove(full_local_file.c_str()) != 0) { LOG(WARNING) << "failed to delete unknown local file: " << full_local_file @@ -787,7 +787,7 @@ Status SnapshotLoader::upload(const std::map& src_to_d if (!_remote_fs) { return Status::InternalError("Storage backend not initialized."); } - LOG(INFO) << "begin to upload snapshot files. num: " << src_to_dest_path.size() + LOG(INFO) << "Start uploading snapshot files. num: " << src_to_dest_path.size() << ", broker addr: " << _broker_addr << ", job: " << _job_id << ", task" << _task_id; // check if job has already been cancelled @@ -869,11 +869,11 @@ Status SnapshotLoader::upload(const std::map& src_to_d tablet_files->emplace(tablet_id, local_files_with_checksum); finished_num++; - LOG(INFO) << "finished to write tablet to remote. local path: " << src_path + LOG(INFO) << "Finished writing tablet to remote. local path: " << src_path << ", remote path: " << dest_path; } // end for each tablet path - LOG(INFO) << "finished to upload snapshots. job: " << _job_id << ", task id: " << _task_id; + LOG(INFO) << "Finished uploading snapshots. job: " << _job_id << ", task id: " << _task_id; return status; } @@ -887,7 +887,7 @@ Status SnapshotLoader::download(const std::map& src_to if (!_remote_fs) { return Status::InternalError("Storage backend not initialized."); } - LOG(INFO) << "begin to download snapshot files. num: " << src_to_dest_path.size() + LOG(INFO) << "Start downloading snapshot files. num: " << src_to_dest_path.size() << ", broker addr: " << _broker_addr << ", job: " << _job_id << ", task id: " << _task_id; @@ -994,7 +994,7 @@ Status SnapshotLoader::download(const std::map& src_to // we need to replace the tablet_id in remote file name with local tablet id RETURN_IF_ERROR(_replace_tablet_id(remote_file, local_tablet_id, &local_file_name)); std::string full_local_file = local_path + "/" + local_file_name; - LOG(INFO) << "begin to download from " << full_remote_file << " to " << full_local_file; + LOG(INFO) << "Start downloading from " << full_remote_file << " to " << full_local_file; size_t file_len = file_stat.size; // check disk capacity @@ -1026,7 +1026,7 @@ Status SnapshotLoader::download(const std::map& src_to // local_files always keep the updated local files local_files.push_back(local_file_name); - LOG(INFO) << "finished to download file via broker. file: " << full_local_file + LOG(INFO) << "Finished downloading file via broker. file: " << full_local_file << ", length: " << file_len; } // end for all remote files @@ -1049,7 +1049,7 @@ Status SnapshotLoader::download(const std::map& src_to // delete std::string full_local_file = local_path + "/" + local_file; - VLOG_CRITICAL << "begin to delete local snapshot file: " << full_local_file + VLOG_CRITICAL << "Start deleting local snapshot file: " << full_local_file << ", it does not exist in remote"; if (remove(full_local_file.c_str()) != 0) { LOG(WARNING) << "failed to delete unknown local file: " << full_local_file @@ -1060,7 +1060,7 @@ Status SnapshotLoader::download(const std::map& src_to finished_num++; } // end for src_to_dest_path - LOG(INFO) << "finished to download snapshots. job: " << _job_id << ", task id: " << _task_id; + LOG(INFO) << "Finished downloading snapshots. job: " << _job_id << ", task id: " << _task_id; return status; } @@ -1115,7 +1115,7 @@ Status SnapshotLoader::remote_http_download( #endif } - LOG(INFO) << "finished to download snapshots. job: " << _job_id << ", task id: " << _task_id; + LOG(INFO) << "Finished downloading snapshots. job: " << _job_id << ", task id: " << _task_id; return status; } @@ -1132,7 +1132,7 @@ Status SnapshotLoader::move(const std::string& snapshot_path, TabletSharedPtr ta auto tablet_path = tablet->tablet_path(); auto store_path = tablet->data_dir()->path(); - LOG(INFO) << "begin to move snapshot files. from: " << snapshot_path << ", to: " << tablet_path + LOG(INFO) << "Start moving snapshot files. from: " << snapshot_path << ", to: " << tablet_path << ", store: " << store_path << ", job: " << _job_id << ", task id: " << _task_id; Status status = Status::OK(); @@ -1278,7 +1278,7 @@ Status SnapshotLoader::move(const std::string& snapshot_path, TabletSharedPtr ta // mark the snapshot path as loaded RETURN_IF_ERROR(write_loaded_tag(snapshot_path, tablet_id)); - LOG(INFO) << "finished to reload header of tablet: " << tablet_id; + LOG(INFO) << "Finished reloading header of tablet: " << tablet_id; return status; } @@ -1373,7 +1373,7 @@ Status SnapshotLoader::_get_existing_files_from_local(const std::string& local_p for (auto& file : files) { local_files->push_back(file.file_name); } - LOG(INFO) << "finished to list files in local path: " << local_path + LOG(INFO) << "Finished listing files in local path: " << local_path << ", file num: " << local_files->size(); return Status::OK(); } diff --git a/be/src/service/http/action/check_tablet_segment_action.cpp b/be/src/service/http/action/check_tablet_segment_action.cpp index e86a7adc074e18..66836215932278 100644 --- a/be/src/service/http/action/check_tablet_segment_action.cpp +++ b/be/src/service/http/action/check_tablet_segment_action.cpp @@ -58,9 +58,9 @@ void CheckTabletSegmentAction::handle(HttpRequest* req) { return; } - LOG(INFO) << "start to check tablet segment."; + LOG(INFO) << "Start checking tablet segment."; std::set bad_tablets = _engine.tablet_manager()->check_all_tablet_segment(repair); - LOG(INFO) << "finish to check tablet segment."; + LOG(INFO) << "Finished checking tablet segment."; EasyJson result_ej; result_ej["status"] = "Success"; diff --git a/be/src/service/http/action/compaction_score_action.cpp b/be/src/service/http/action/compaction_score_action.cpp index fd04f65717a86d..16afd9b12497c0 100644 --- a/be/src/service/http/action/compaction_score_action.cpp +++ b/be/src/service/http/action/compaction_score_action.cpp @@ -102,7 +102,7 @@ struct CloudCompactionScoresAccessor final : CompactionScoresAccessor { Status sync_meta() { auto tablets = get_all_tablets(); - LOG(INFO) << "start to sync meta from ms"; + LOG(INFO) << "Start syncing meta from ms"; MonotonicStopWatch stopwatch; stopwatch.start(); diff --git a/be/src/service/http/action/restore_tablet_action.cpp b/be/src/service/http/action/restore_tablet_action.cpp index beeba3449acff9..461d92a725a4f4 100644 --- a/be/src/service/http/action/restore_tablet_action.cpp +++ b/be/src/service/http/action/restore_tablet_action.cpp @@ -104,7 +104,7 @@ Status RestoreTabletAction::_handle(HttpRequest* req) { } else { // set key in map and initialize value as "" _tablet_path_map[key] = ""; - LOG(INFO) << "start to restore tablet_id:" << tablet_id + LOG(INFO) << "Start restoring tablet_id:" << tablet_id << " schema_hash:" << schema_hash; } } diff --git a/be/src/service/http/action/stream_load.cpp b/be/src/service/http/action/stream_load.cpp index 8582ced02a72e9..9278e10a8796ff 100644 --- a/be/src/service/http/action/stream_load.cpp +++ b/be/src/service/http/action/stream_load.cpp @@ -214,7 +214,7 @@ void StreamLoadAction::_send_reply(std::shared_ptr ctx, HttpR HttpChannel::send_reply(req, str); - LOG(INFO) << "finished to execute stream load. label=" << ctx->label + LOG(INFO) << "Finished executing stream load. label=" << ctx->label << ", txn_id=" << ctx->txn_id << ", query_id=" << ctx->id << ", load_cost_ms=" << ctx->load_cost_millis << ", receive_data_cost_ms=" << (ctx->receive_and_read_data_cost_nanos - ctx->read_data_cost_nanos) / 1000000 @@ -263,7 +263,7 @@ int StreamLoadAction::on_header(HttpRequest* req) { if (st.ok()) { st = _on_header(req, ctx); - LOG(INFO) << "finished to handle HTTP header, " << ctx->brief(); + LOG(INFO) << "Finished handling HTTP header, " << ctx->brief(); } if (!st.ok()) { ctx->status = std::move(st); diff --git a/be/src/service/http/http_request.cpp b/be/src/service/http/http_request.cpp index 7c38d5b9e300e5..60c680eb24d68e 100644 --- a/be/src/service/http/http_request.cpp +++ b/be/src/service/http/http_request.cpp @@ -186,7 +186,7 @@ void HttpRequest::wait_finish_send_reply() { _handler->free_handler_ctx(_handler_ctx); } - VLOG_NOTICE << "start to wait send reply, infos=" << infos; + VLOG_NOTICE << "Start waiting send reply, infos=" << infos; auto status = _http_reply_future.wait_for(std::chrono::seconds(config::async_reply_timeout_s)); // if request is timeout and can't cancel fragment in time, it will cause some new request block // so we will free cancelled request in time. diff --git a/be/src/service/http/utils.cpp b/be/src/service/http/utils.cpp index 7f393131121461..94d59837e35a56 100644 --- a/be/src/service/http/utils.cpp +++ b/be/src/service/http/utils.cpp @@ -321,7 +321,7 @@ Status download_files_v2(const std::string& address, const std::string& token, estimate_timeout = config::download_low_speed_time; } - LOG(INFO) << "begin to download files from " << remote_url << " to " << local_dir + LOG(INFO) << "Start downloading files from " << remote_url << " to " << local_dir << ", file count: " << file_info_list.size() << ", total size: " << batch_file_size << ", timeout: " << estimate_timeout; diff --git a/be/src/storage/compaction/compaction.cpp b/be/src/storage/compaction/compaction.cpp index 698c81f7849fd0..31be45b67df9f9 100644 --- a/be/src/storage/compaction/compaction.cpp +++ b/be/src/storage/compaction/compaction.cpp @@ -360,7 +360,7 @@ Status CompactionMixin::do_compact_ordered_rowsets() { RowsetWriterContext ctx; RETURN_IF_ERROR(construct_output_rowset_writer(ctx)); - LOG(INFO) << "start to do ordered data compaction, tablet=" << _tablet->tablet_id() + LOG(INFO) << "Start doing ordered data compaction, tablet=" << _tablet->tablet_id() << ", output_version=" << _output_version; // link data to new rowset auto seg_id = 0; @@ -1171,7 +1171,7 @@ Status CompactionMixin::update_delete_bitmap() { } rowsets.push_back(rowset); } - LOG(INFO) << "finish update delete bitmap for tablet: " << _tablet->tablet_id() + LOG(INFO) << "Finished updating delete bitmap for tablet: " << _tablet->tablet_id() << ", rowsets: " << _input_rowsets.size() << ", cost: " << watch.get_elapse_time_us() << "(us)"; return Status::OK(); @@ -1197,7 +1197,7 @@ Status CloudCompactionMixin::update_delete_bitmap() { } rowsets.push_back(rowset); } - LOG(INFO) << "finish update delete bitmap for tablet: " << _tablet->tablet_id() + LOG(INFO) << "Finished updating delete bitmap for tablet: " << _tablet->tablet_id() << ", rowsets: " << _input_rowsets.size() << ", cost: " << watch.get_elapse_time_us() << "(us)"; return Status::OK(); diff --git a/be/src/storage/compaction/single_replica_compaction.cpp b/be/src/storage/compaction/single_replica_compaction.cpp index a0b36cd417851f..0bac83edf4ccc2 100644 --- a/be/src/storage/compaction/single_replica_compaction.cpp +++ b/be/src/storage/compaction/single_replica_compaction.cpp @@ -282,7 +282,7 @@ bool SingleReplicaCompaction::_find_rowset_to_fetch(const std::vector& Status SingleReplicaCompaction::_fetch_rowset(const TReplicaInfo& addr, const std::string& token, const Version& rowset_version) { - LOG(INFO) << "begin to fetch compaction result, tablet_id=" << _tablet->tablet_id() + LOG(INFO) << "Start fetching compaction result, tablet_id=" << _tablet->tablet_id() << ", addr=" << addr.host << ", version=" << rowset_version; std::shared_lock migration_rlock(tablet()->get_migration_lock(), std::try_to_lock); if (!migration_rlock.owns_lock()) { @@ -581,7 +581,7 @@ Status SingleReplicaCompaction::_finish_clone(const std::string& clone_dir, LOG(WARNING) << "failed to remove=" << clone_dir_path << " msg=" << ec.message(); return Status::IOError("failed to remove {}, due to {}", clone_dir, ec.message()); } - LOG(INFO) << "finish to clone data, clear downloaded data. res=" << res + LOG(INFO) << "Finished cloning data, clear downloaded data. res=" << res << ", tablet=" << _tablet->tablet_id() << ", clone_dir=" << clone_dir; return res; } diff --git a/be/src/storage/data_dir.cpp b/be/src/storage/data_dir.cpp index 9b057259a4ebff..a6b1a369d4ecf4 100644 --- a/be/src/storage/data_dir.cpp +++ b/be/src/storage/data_dir.cpp @@ -345,7 +345,7 @@ Status DataDir::_check_incompatible_old_format_tablet() { // TODO(ygl): deal with rowsets and tablets when load failed Status DataDir::load() { - LOG(INFO) << "start to load tablets from " << _path; + LOG(INFO) << "Start loading tablets from " << _path; // load rowset meta from meta env and create rowset // COMMITTED: add to txn manager @@ -356,7 +356,7 @@ Status DataDir::load() { RETURN_IF_ERROR(_check_incompatible_old_format_tablet()); std::vector dir_rowset_metas; - LOG(INFO) << "begin loading rowset from meta"; + LOG(INFO) << "Begin to load rowset from meta"; auto load_rowset_func = [&dir_rowset_metas, this](TabletUid tablet_uid, RowsetId rowset_id, std::string_view meta_str) -> bool { RowsetMetaSharedPtr rowset_meta(new RowsetMeta()); @@ -418,7 +418,7 @@ Status DataDir::load() { // load tablet // create tablet from tablet meta and add it to tablet mgr - LOG(INFO) << "begin loading tablet from meta"; + LOG(INFO) << "Begin to load tablet from meta"; std::set tablet_ids; std::set failed_tablet_ids; auto load_tablet_func = [this, &tablet_ids, &failed_tablet_ids]( @@ -661,7 +661,7 @@ Status DataDir::load() { // At startup, we only count these invalid rowset, but do not actually delete it. // The actual delete operation is in StorageEngine::_clean_unused_rowset_metas, // which is cleaned up uniformly by the background cleanup thread. - LOG(INFO) << "finish to load tablets from " << _path + LOG(INFO) << "Finished loading tablets from " << _path << ", total rowset meta: " << dir_rowset_metas.size() << ", invalid rowset num: " << invalid_rowset_counter << ", visible/stale rowsets' delete bitmap count: " << dbm_cnt @@ -814,7 +814,7 @@ void DataDir::perform_path_gc() { return; } - LOG(INFO) << "start to gc data dir " << _path; + LOG(INFO) << "Start gcing data dir " << _path; auto data_path = fmt::format("{}/{}", _path, DATA_PREFIX); std::vector shards; bool exists = true; diff --git a/be/src/storage/index/inverted/inverted_index_reader.cpp b/be/src/storage/index/inverted/inverted_index_reader.cpp index d3b1a67ee33fe2..d5b3bd8e7bcef4 100644 --- a/be/src/storage/index/inverted/inverted_index_reader.cpp +++ b/be/src/storage/index/inverted/inverted_index_reader.cpp @@ -297,7 +297,7 @@ Status FullTextIndexReader::query(const IndexQueryContextPtr& context, SCOPED_RAW_TIMER(&context->stats->inverted_index_query_timer); std::string search_str = *reinterpret_cast(query_value); - VLOG_DEBUG << column_name << " begin to search the fulltext index from clucene, query_str [" + VLOG_DEBUG << column_name << " Begin to search the fulltext index from clucene, query_str [" << search_str << "]"; const auto& queryOptions = context->runtime_state->query_options(); @@ -427,7 +427,7 @@ Status StringTypeInvertedIndexReader::query(const IndexQueryContextPtr& context, "query value is too long, evaluate skipped."); } - VLOG_DEBUG << "begin to query the inverted index from clucene" + VLOG_DEBUG << "Start querying the inverted index from clucene" << ", column_name: " << column_name << ", search_str: " << search_str; try { auto index_file_key = _index_file_reader->get_index_file_cache_key(&_index_meta); diff --git a/be/src/storage/olap_server.cpp b/be/src/storage/olap_server.cpp index 498cc8e6c3115d..fba41d4d99441d 100644 --- a/be/src/storage/olap_server.cpp +++ b/be/src/storage/olap_server.cpp @@ -493,7 +493,7 @@ void StorageEngine::_tablet_checkpoint_callback(const std::vector& dat int64_t interval = config::generate_tablet_meta_checkpoint_tasks_interval_secs; do { for (auto data_dir : data_dirs) { - LOG(INFO) << "begin to produce tablet meta checkpoint tasks, data_dir=" + LOG(INFO) << "Start producing tablet meta checkpoint tasks, data_dir=" << data_dir->path(); auto st = _tablet_meta_checkpoint_thread_pool->submit_func( [data_dir, this]() { _tablet_manager->do_tablet_meta_checkpoint(data_dir); }); @@ -527,7 +527,7 @@ void StorageEngine::_tablet_path_check_callback() { continue; } - LOG(INFO) << "start to check tablet path"; + LOG(INFO) << "Start checking tablet path"; auto all_tablets = _tablet_manager->get_all_tablet( [](Tablet* t) { return t->is_used() && t->tablet_state() == TABLET_RUNNING; }); @@ -762,7 +762,7 @@ void StorageEngine::_update_replica_infos_callback() { #ifdef GOOGLE_PROFILER ProfilerRegisterThread(); #endif - LOG(INFO) << "start to update replica infos!"; + LOG(INFO) << "Start updating replica infos!"; int64_t interval = config::update_replica_infos_interval_seconds; do { @@ -1056,7 +1056,7 @@ Status StorageEngine::_submit_compaction_task(TabletSharedPtr tablet, CompactionType compaction_type, bool force) { if (tablet->tablet_meta()->tablet_schema()->enable_single_replica_compaction() && should_fetch_from_peer(tablet->tablet_id())) { - VLOG_CRITICAL << "start to submit single replica compaction task for tablet: " + VLOG_CRITICAL << "Start submitting single replica compaction task for tablet: " << tablet->tablet_id(); Status st = _submit_single_replica_compaction_task(tablet, compaction_type); if (!st.ok()) { @@ -1363,7 +1363,7 @@ void StorageEngine::_cooldown_tasks_producer_callback() { void StorageEngine::_remove_unused_remote_files_callback() { while (!_stop_background_threads_latch.wait_for( std::chrono::seconds(config::remove_unused_remote_files_interval_sec))) { - LOG(INFO) << "begin to remove unused remote files"; + LOG(INFO) << "Start removing unused remote files"; do_remove_unused_remote_files(); } } @@ -1450,7 +1450,7 @@ static void confirm_and_remove_unused_remote_files( std::unordered_map>>& buffer, const int64_t num_files_in_buffer) { TConfirmUnusedRemoteFilesResult result; - LOG(INFO) << "begin to confirm unused remote files. num_tablets=" << buffer.size() + LOG(INFO) << "Start confirming unused remote files. num_tablets=" << buffer.size() << " num_files=" << num_files_in_buffer; auto st = MasterServerClient::instance()->confirm_unused_remote_files(req, &result); if (!st.ok()) { diff --git a/be/src/storage/rowset/beta_rowset.cpp b/be/src/storage/rowset/beta_rowset.cpp index e94430731b17da..889f0b039d9246 100644 --- a/be/src/storage/rowset/beta_rowset.cpp +++ b/be/src/storage/rowset/beta_rowset.cpp @@ -301,7 +301,7 @@ Status BetaRowset::remove() { } // TODO should we close and remove all segment reader first? - VLOG_NOTICE << "begin to remove files in rowset " << rowset_id() + VLOG_NOTICE << "Start removing files in rowset " << rowset_id() << ", version:" << start_version() << "-" << end_version() << ", tabletid:" << _rowset_meta->tablet_id(); // If the rowset was removed, it need to remove the fds in segment cache directly diff --git a/be/src/storage/rowset/rowset_meta_manager.cpp b/be/src/storage/rowset/rowset_meta_manager.cpp index 06aab3314a5750..cac2b9740cfb6a 100644 --- a/be/src/storage/rowset/rowset_meta_manager.cpp +++ b/be/src/storage/rowset/rowset_meta_manager.cpp @@ -466,7 +466,7 @@ Status RowsetMetaManager::_get_all_rowset_binlog_metas(OlapMeta* meta, const Tab Status RowsetMetaManager::remove(OlapMeta* meta, TabletUid tablet_uid, const RowsetId& rowset_id) { std::string key = ROWSET_PREFIX + tablet_uid.to_string() + "_" + rowset_id.to_string(); - VLOG_NOTICE << "start to remove rowset, key:" << key; + VLOG_NOTICE << "Start removing rowset, key:" << key; Status status = meta->remove(META_COLUMN_FAMILY_INDEX, key); VLOG_NOTICE << "remove rowset key:" << key << " finished"; return status; diff --git a/be/src/storage/rowset_version_mgr.cpp b/be/src/storage/rowset_version_mgr.cpp index 368d2539a1079b..ca08636d578a18 100644 --- a/be/src/storage/rowset_version_mgr.cpp +++ b/be/src/storage/rowset_version_mgr.cpp @@ -231,9 +231,9 @@ struct GetRowsetsCntl : std::enable_shared_from_this { bthread_attr_t attr = BTHREAD_ATTR_NORMAL; bool succ = call_bthread(tid, &attr, [self = shared_from_this(), &ip, port]() { - LOG(INFO) << "start to get tablet rowsets from peer BE, ip=" << ip; + LOG(INFO) << "Start getting tablet rowsets from peer BE, ip=" << ip; Defer defer_log {[&ip, port]() { - LOG(INFO) << "finish to get rowsets from peer BE, ip=" << ip + LOG(INFO) << "Finished getting rowsets from peer BE, ip=" << ip << ", port=" << port; }}; diff --git a/be/src/storage/schema_change/schema_change.cpp b/be/src/storage/schema_change/schema_change.cpp index e9cfcb830687b9..f115c023c5bd3f 100644 --- a/be/src/storage/schema_change/schema_change.cpp +++ b/be/src/storage/schema_change/schema_change.cpp @@ -821,7 +821,7 @@ Status SchemaChangeJob::process_alter_tablet(const TAlterTabletReqV2& request) { request.new_tablet_id); } - LOG(INFO) << "begin to do request alter tablet: base_tablet_id=" << request.base_tablet_id + LOG(INFO) << "Start doing request alter tablet: base_tablet_id=" << request.base_tablet_id << ", new_tablet_id=" << request.new_tablet_id << ", alter_version=" << request.alter_version; @@ -838,7 +838,7 @@ Status SchemaChangeJob::process_alter_tablet(const TAlterTabletReqV2& request) { } Status res = _do_process_alter_tablet(request); - LOG(INFO) << "finished alter tablet process, res=" << res; + LOG(INFO) << "Finished altering tablet process, res=" << res; DBUG_EXECUTE_IF("SchemaChangeJob::process_alter_tablet.leave.sleep", { sleep(5); }); return res; } @@ -890,7 +890,7 @@ Status SchemaChangeJob::_do_process_alter_tablet(const TAlterTabletReqV2& reques } }); - LOG(INFO) << "finish to validate alter tablet request. begin to convert data from base tablet " + LOG(INFO) << "Finished validating alter tablet request. begin to convert data from base tablet " "to new tablet" << " base_tablet=" << _base_tablet->tablet_id() << " new_tablet=" << _new_tablet->tablet_id(); @@ -987,7 +987,7 @@ Status SchemaChangeJob::_do_process_alter_tablet(const TAlterTabletReqV2& reques } // before calculating version_to_be_changed, // remove all data from new tablet, prevent to rewrite data(those double pushed when wait) - LOG(INFO) << "begin to remove all data before end version from new tablet to prevent " + LOG(INFO) << "Start removing all data before end version from new tablet to prevent " "rewrite." << " new_tablet=" << _new_tablet->tablet_id() << ", end_version=" << max_rowset->end_version(); @@ -1203,7 +1203,7 @@ Status SchemaChangeJob::_get_versions_to_be_changed(std::vector* versio // converted from a base tablet, only used for the mow table now. Status SchemaChangeJob::_convert_historical_rowsets(const SchemaChangeParams& sc_params, int64_t* real_alter_version) { - LOG(INFO) << "begin to convert historical rowsets for new_tablet from base_tablet." + LOG(INFO) << "Start converting historical rowsets for new_tablet from base_tablet." << " base_tablet=" << _base_tablet->tablet_id() << ", new_tablet=" << _new_tablet->tablet_id() << ", job_id=" << _job_id; @@ -1241,7 +1241,7 @@ Status SchemaChangeJob::_convert_historical_rowsets(const SchemaChangeParams& sc res = _new_tablet->check_version_integrity(test_version); } - LOG(INFO) << "finish converting rowsets for new_tablet from base_tablet. " + LOG(INFO) << "Finished converting rowsets for new_tablet from base_tablet. " << "base_tablet=" << _base_tablet->tablet_id() << ", new_tablet=" << _new_tablet->tablet_id(); return res; diff --git a/be/src/storage/storage_engine.cpp b/be/src/storage/storage_engine.cpp index cb49a5ac149a2c..b336dd6485734e 100644 --- a/be/src/storage/storage_engine.cpp +++ b/be/src/storage/storage_engine.cpp @@ -786,7 +786,7 @@ void StorageEngine::clear_transaction_task(const TTransactionId transaction_id) void StorageEngine::clear_transaction_task(const TTransactionId transaction_id, const std::vector& partition_ids) { - LOG(INFO) << "begin to clear transaction task. transaction_id=" << transaction_id; + LOG(INFO) << "Starting to clear transaction task. transaction_id=" << transaction_id; for (const TPartitionId& partition_id : partition_ids) { std::map tablet_infos; @@ -851,7 +851,7 @@ Status StorageEngine::start_trash_sweep(double* usage, bool ignore_guard) { double tmp_usage = 0.0; for (DataDirInfo& info : data_dir_infos) { - LOG(INFO) << "Start to sweep path " << info.path; + LOG(INFO) << "Start sweeping path " << info.path; if (!info.is_used) { continue; } @@ -1116,7 +1116,7 @@ void StorageEngine::_clean_unused_partial_update_info() { void StorageEngine::gc_binlogs(const std::unordered_map& gc_tablet_infos) { for (auto [tablet_id, version] : gc_tablet_infos) { - LOG(INFO) << fmt::format("start to gc binlogs for tablet_id: {}, version: {}", tablet_id, + LOG(INFO) << fmt::format("Start gcing binlogs for tablet_id: {}, version: {}", tablet_id, version); TabletSharedPtr tablet = _tablet_manager->get_tablet(tablet_id); @@ -1228,7 +1228,7 @@ void StorageEngine::_parse_default_rowset_type() { void StorageEngine::start_delete_unused_rowset() { DBUG_EXECUTE_IF("StorageEngine::start_delete_unused_rowset.block", DBUG_BLOCK); - LOG(INFO) << "start to delete unused rowset, size: " << _unused_rowsets.size() + LOG(INFO) << "Start deleting unused rowset, size: " << _unused_rowsets.size() << ", unused delete bitmap size: " << _unused_delete_bitmap.size(); std::vector unused_rowsets_copy; unused_rowsets_copy.reserve(_unused_rowsets.size()); @@ -1292,8 +1292,7 @@ void StorageEngine::start_delete_unused_rowset() { << due_to_delayed_expired_ts << " rowsets due to delayed expired timestamp. left " << _unused_delete_bitmap.size() << " unused delete bitmap."; for (auto&& rs : unused_rowsets_copy) { - VLOG_NOTICE << "start to remove rowset:" << rs->rowset_id() - << ", version:" << rs->version(); + VLOG_NOTICE << "Start removing rowset:" << rs->rowset_id() << ", version:" << rs->version(); // delete delete_bitmap of unused rowsets if (auto tablet = _tablet_manager->get_tablet(rs->rowset_meta()->tablet_id()); tablet && tablet->enable_unique_key_merge_on_write()) { @@ -1383,7 +1382,7 @@ Status StorageEngine::get_tablet_meta(int64_t tablet_id, TabletMetaSharedPtr* ta Status StorageEngine::obtain_shard_path(TStorageMedium::type storage_medium, int64_t path_hash, std::string* shard_path, DataDir** store, int64_t partition_id) { - LOG(INFO) << "begin to process obtain root path. storage_medium=" << storage_medium; + LOG(INFO) << "Start processing obtain root path. storage_medium=" << storage_medium; if (shard_path == nullptr) { return Status::Error( @@ -1421,7 +1420,7 @@ Status StorageEngine::obtain_shard_path(TStorageMedium::type storage_medium, int Status StorageEngine::load_header(const string& shard_path, const TCloneReq& request, bool restore) { - LOG(INFO) << "begin to process load headers." + LOG(INFO) << "Start processing load headers." << "tablet_id=" << request.tablet_id << ", schema_hash=" << request.schema_hash; Status res = Status::OK(); @@ -1551,7 +1550,7 @@ bool StorageEngine::get_peers_replica_backends(int64_t tablet_id, std::vectorcluster_info(); if (cluster_info == nullptr) { LOG(WARNING) << "Have not get FE Master heartbeat yet"; diff --git a/be/src/storage/tablet/base_tablet.cpp b/be/src/storage/tablet/base_tablet.cpp index 88e71901312bc0..b465dd4da419f8 100644 --- a/be/src/storage/tablet/base_tablet.cpp +++ b/be/src/storage/tablet/base_tablet.cpp @@ -1621,7 +1621,7 @@ void BaseTablet::calc_compaction_output_rowset_delete_bitmap( VLOG_DEBUG << "calc_compaction_output_rowset_delete_bitmap dst location: |" << dst.rowset_id << "|" << dst.segment_id << "|" << dst.row_id << " src location: |" << src.rowset_id << "|" << src.segment_id - << "|" << src.row_id << " start version: " << start_version + << "|" << src.row_id << " Start versioning: " << start_version << "end version" << end_version; if (location_map) { (*location_map)[rowset].emplace_back(src, dst); diff --git a/be/src/storage/tablet/tablet.cpp b/be/src/storage/tablet/tablet.cpp index a7df4d49959375..58de6eb2134ad9 100644 --- a/be/src/storage/tablet/tablet.cpp +++ b/be/src/storage/tablet/tablet.cpp @@ -281,7 +281,7 @@ bool Tablet::set_tablet_schema_into_rowset_meta() { Status Tablet::_init_once_action() { Status res = Status::OK(); - VLOG_NOTICE << "begin to load tablet. tablet=" << tablet_id() + VLOG_NOTICE << "Start loading tablet. tablet=" << tablet_id() << ", version_size=" << _tablet_meta->version_count(); #ifdef BE_TEST @@ -344,7 +344,7 @@ void Tablet::save_meta() { Status Tablet::revise_tablet_meta(const std::vector& to_add, const std::vector& to_delete, bool is_incremental_clone) { - LOG(INFO) << "begin to revise tablet. tablet_id=" << tablet_id(); + LOG(INFO) << "Start revising tablet. tablet_id=" << tablet_id(); // 1. for incremental clone, we have to add the rowsets first to make it easy to compute // all the delete bitmaps, and it's easy to delete them if we end up with a failure // 2. for full clone, we can calculate delete bitmaps on the cloned rowsets directly. @@ -476,7 +476,7 @@ Status Tablet::revise_tablet_meta(const std::vector& to_add, _tablet_meta->clear_stale_rowset(); save_meta(); - LOG(INFO) << "finish to revise tablet. tablet_id=" << tablet_id(); + LOG(INFO) << "Finished revising tablet. tablet_id=" << tablet_id(); return Status::OK(); } @@ -722,7 +722,7 @@ void Tablet::_delete_stale_rowset_by_version(const Version& version) { void Tablet::delete_expired_stale_rowset() { if (config::enable_mow_verbose_log) { - LOG_INFO("begin delete_expired_stale_rowset for tablet={}", tablet_id()); + LOG_INFO("Begin to delete_expired_stale_rowset for tablet={}", tablet_id()); } int64_t now = UnixSeconds(); std::vector>> deleted_stale_rowsets; @@ -915,7 +915,7 @@ void Tablet::delete_expired_stale_rowset() { } #endif if (config::enable_mow_verbose_log) { - LOG_INFO("finish delete_expired_stale_rowset for tablet={}", tablet_id()); + LOG_INFO("Finished deleting_expired_stale_rowset for tablet={}", tablet_id()); } DBUG_EXECUTE_IF("Tablet.delete_expired_stale_rowset.start_delete_unused_rowset", { _engine.start_delete_unused_rowset(); @@ -1503,7 +1503,7 @@ bool Tablet::do_tablet_meta_checkpoint() { << ", tablet=" << tablet_id(); return false; } - VLOG_NOTICE << "start to do tablet meta checkpoint, tablet=" << tablet_id(); + VLOG_NOTICE << "Start doing tablet meta checkpoint, tablet=" << tablet_id(); save_meta(); // if save meta successfully, then should remove the rowset meta existing in tablet // meta from rowset meta store diff --git a/be/src/storage/tablet/tablet_manager.cpp b/be/src/storage/tablet/tablet_manager.cpp index 63abb73adf699e..aa36c1fef417ec 100644 --- a/be/src/storage/tablet/tablet_manager.cpp +++ b/be/src/storage/tablet/tablet_manager.cpp @@ -98,7 +98,7 @@ Status TabletManager::_add_tablet_unlocked(TTabletId tablet_id, const TabletShar ADD_TIMER(profile, "AddTablet"); } Status res = Status::OK(); - VLOG_NOTICE << "begin to add tablet to TabletManager. " + VLOG_NOTICE << "Start adding tablet to TabletManager. " << "tablet_id=" << tablet_id << ", force=" << force; TabletSharedPtr existed_tablet = nullptr; @@ -253,7 +253,7 @@ Status TabletManager::create_tablet(const TCreateTabletReq& request, std::vector DorisMetrics::instance()->create_tablet_requests_total->increment(1); int64_t tablet_id = request.tablet_id; - LOG(INFO) << "begin to create tablet. tablet_id=" << tablet_id + LOG(INFO) << "Start creating tablet. tablet_id=" << tablet_id << ", table_id=" << request.table_id << ", partition_id=" << request.partition_id << ", replica_id=" << request.replica_id << ", stores.size=" << stores.size() << ", first store=" << stores[0]->path(); @@ -520,7 +520,7 @@ Status TabletManager::drop_tablet(TTabletId tablet_id, TReplicaId replica_id, // Drop specified tablet. Status TabletManager::_drop_tablet(TTabletId tablet_id, TReplicaId replica_id, bool keep_files, bool is_drop_table_or_partition, bool had_held_shard_lock) { - LOG(INFO) << "begin drop tablet. tablet_id=" << tablet_id << ", replica_id=" << replica_id + LOG(INFO) << "Begin to drop tablet. tablet_id=" << tablet_id << ", replica_id=" << replica_id << ", is_drop_table_or_partition=" << is_drop_table_or_partition << ", keep_files=" << keep_files; DorisMetrics::instance()->drop_tablet_requests_total->increment(1); @@ -969,7 +969,7 @@ Status TabletManager::load_tablet_from_meta(DataDir* data_dir, TTabletId tablet_ Status TabletManager::load_tablet_from_dir(DataDir* store, TTabletId tablet_id, SchemaHash schema_hash, const string& schema_hash_path, bool force, bool restore) { - LOG(INFO) << "begin to load tablet from dir. " + LOG(INFO) << "Start loading tablet from dir. " << " tablet_id=" << tablet_id << " schema_hash=" << schema_hash << " path = " << schema_hash_path << " force = " << force << " restore = " << restore; // not add lock here, because load_tablet_from_meta already add lock @@ -1065,7 +1065,7 @@ Status TabletManager::load_tablet_from_dir(DataDir* store, TTabletId tablet_id, } Status TabletManager::report_tablet_info(TTabletInfo* tablet_info) { - LOG(INFO) << "begin to process report tablet info." + LOG(INFO) << "Start processing report tablet info." << "tablet_id=" << tablet_info->tablet_id; Status res = Status::OK(); @@ -1082,7 +1082,7 @@ Status TabletManager::report_tablet_info(TTabletInfo* tablet_info) { void TabletManager::build_all_report_tablets_info(std::map* tablets_info) { DCHECK(tablets_info != nullptr); - VLOG_NOTICE << "begin to build all report tablets info"; + VLOG_NOTICE << "Start building all report tablets info"; // build the expired txn map first, outside the tablet map lock std::map> expire_txn_map; @@ -1162,7 +1162,7 @@ Status TabletManager::start_trash_sweep() { g_max_rowsets_with_useless_delete_bitmap.set_value(max_useless_rowset_count); g_max_rowsets_with_useless_delete_bitmap_version.set_value( max_useless_rowset_version_count); - LOG(INFO) << "finish check_agg_delete_bitmap_for_stale_rowsets, cost(us)=" + LOG(INFO) << "Finished check_agg_delete_bitmap_for_stale_rowsetsing, cost(us)=" << watch.get_elapse_time_us() << ". max useless rowset count=" << max_useless_rowset_count << ", tablet_id=" << tablet_id_with_max_useless_rowset_count @@ -1297,7 +1297,7 @@ bool TabletManager::_move_tablet_to_trash(const TabletSharedPtr& tablet) { return false; } int64_t now = MonotonicMicros(); - LOG(INFO) << "start to move tablet to trash. " << tablet_path + LOG(INFO) << "Start moving tablet to trash. " << tablet_path << ". rocksdb get meta cost " << (save_meta_ts - get_meta_ts) << " us, rocksdb save meta cost " << (now - save_meta_ts) << " us"; Status rm_st = tablet->data_dir()->move_to_trash(tablet_path); @@ -1443,7 +1443,7 @@ void TabletManager::try_delete_unused_tablet_path(DataDir* data_dir, TTabletId t bool exists = false; Status exists_st = io::global_local_filesystem()->exists(schema_hash_path, &exists); if (exists_st && exists) { - LOG(INFO) << "start to move tablet to trash. tablet_path = " << schema_hash_path; + LOG(INFO) << "Start moving tablet to trash. tablet_path = " << schema_hash_path; Status rm_st = data_dir->move_to_trash(schema_hash_path); if (!rm_st.ok()) { LOG(WARNING) << "fail to move dir to trash. dir=" << schema_hash_path; @@ -1517,7 +1517,7 @@ void TabletManager::do_tablet_meta_checkpoint(DataDir* data_dir) { } } int64_t cost = watch.elapsed_time() / 1000 / 1000; - LOG(INFO) << "finish to do meta checkpoint on dir: " << data_dir->path() + LOG(INFO) << "Finished doing meta checkpoint on dir: " << data_dir->path() << ", number: " << counter << ", cost(ms): " << cost; } @@ -1576,7 +1576,7 @@ Status TabletManager::_create_tablet_meta_unlocked(const TCreateTabletReq& reque } TabletSharedPtr TabletManager::_get_tablet_unlocked(TTabletId tablet_id) { - VLOG_NOTICE << "begin to get tablet. tablet_id=" << tablet_id; + VLOG_NOTICE << "Start getting tablet. tablet_id=" << tablet_id; tablet_map_t& tablet_map = _get_tablet_map(tablet_id); const auto& iter = tablet_map.find(tablet_id); if (iter != tablet_map.end()) { diff --git a/be/src/storage/task/engine_batch_load_task.cpp b/be/src/storage/task/engine_batch_load_task.cpp index 7f2bad13b6b8cf..465e155d97d401 100644 --- a/be/src/storage/task/engine_batch_load_task.cpp +++ b/be/src/storage/task/engine_batch_load_task.cpp @@ -125,7 +125,7 @@ Status EngineBatchLoadTask::_init() { // Check remote path _remote_file_path = _push_req.http_file_path; - LOG(INFO) << "start get file. remote_file_path: " << _remote_file_path; + LOG(INFO) << "Start getting file. remote_file_path: " << _remote_file_path; // Set download param string tmp_file_dir; string root_path = tablet->data_dir()->path(); @@ -267,7 +267,7 @@ Status EngineBatchLoadTask::_process() { Status EngineBatchLoadTask::_push(const TPushReq& request, std::vector* tablet_info_vec) { Status res = Status::OK(); - LOG(INFO) << "begin to process push. " + LOG(INFO) << "Start processing push. " << " transaction_id=" << request.transaction_id << " tablet_id=" << request.tablet_id << ", version=" << request.version; @@ -312,7 +312,7 @@ Status EngineBatchLoadTask::_push(const TPushReq& request, Status EngineBatchLoadTask::_delete_data(const TPushReq& request, std::vector* tablet_info_vec) { - VLOG_DEBUG << "begin to process delete data. request=" << ThriftDebugString(request); + VLOG_DEBUG << "Start processing delete data. request=" << ThriftDebugString(request); DorisMetrics::instance()->delete_requests_total->increment(1); Status res = Status::OK(); diff --git a/be/src/storage/task/engine_checksum_task.cpp b/be/src/storage/task/engine_checksum_task.cpp index 01f879aa731076..236a99b4294f5c 100644 --- a/be/src/storage/task/engine_checksum_task.cpp +++ b/be/src/storage/task/engine_checksum_task.cpp @@ -60,7 +60,7 @@ Status EngineChecksumTask::execute() { } // execute Status EngineChecksumTask::_compute_checksum() { - LOG(INFO) << "begin to process compute checksum." + LOG(INFO) << "Start processing compute checksum." << "tablet_id=" << _tablet_id << ", schema_hash=" << _schema_hash << ", version=" << _version; OlapStopWatch watch; diff --git a/be/src/storage/task/engine_clone_task.cpp b/be/src/storage/task/engine_clone_task.cpp index 1404e9930f7912..e193487da17939 100644 --- a/be/src/storage/task/engine_clone_task.cpp +++ b/be/src/storage/task/engine_clone_task.cpp @@ -336,7 +336,7 @@ Status EngineCloneTask::_set_tablet_info() { // we need to check if this cloned table's version is what we expect. // if not, maybe this is a stale remaining table which is waiting for drop. // we drop it. - LOG(WARNING) << "begin to drop the stale tablet. tablet_id:" << _clone_req.tablet_id + LOG(WARNING) << "Start dropping the stale tablet. tablet_id:" << _clone_req.tablet_id << ", replica_id:" << _clone_req.replica_id << ", schema_hash:" << _clone_req.schema_hash << ", signature:" << _signature << ", version:" << tablet_info.version @@ -886,7 +886,7 @@ Status EngineCloneTask::_finish_clone(Tablet* tablet, const std::string& clone_d Status EngineCloneTask::_finish_incremental_clone(Tablet* tablet, const TabletMetaSharedPtr& cloned_tablet_meta, int64_t version) { - LOG(INFO) << "begin to finish incremental clone. tablet=" << tablet->tablet_id() + LOG(INFO) << "Start finishing incremental clone. tablet=" << tablet->tablet_id() << ", visible_version=" << version << ", cloned_tablet_replica_id=" << cloned_tablet_meta->replica_id(); @@ -922,7 +922,7 @@ Status EngineCloneTask::_finish_incremental_clone(Tablet* tablet, Status EngineCloneTask::_finish_full_clone(Tablet* tablet, const TabletMetaSharedPtr& cloned_tablet_meta) { Version cloned_max_version = cloned_tablet_meta->max_version(); - LOG(INFO) << "begin to finish full clone. tablet=" << tablet->tablet_id() + LOG(INFO) << "Start finishing full clone. tablet=" << tablet->tablet_id() << ", cloned_max_version=" << cloned_max_version; // Compare the version of local tablet and cloned tablet. diff --git a/be/src/storage/task/engine_publish_version_task.cpp b/be/src/storage/task/engine_publish_version_task.cpp index ced5ac1314a423..2e2179d3adcbcf 100644 --- a/be/src/storage/task/engine_publish_version_task.cpp +++ b/be/src/storage/task/engine_publish_version_task.cpp @@ -99,7 +99,7 @@ Status EnginePublishVersionTask::execute() { Status res = Status::OK(); int64_t transaction_id = _publish_version_req.transaction_id; OlapStopWatch watch; - VLOG_NOTICE << "begin to process publish version. transaction_id=" << transaction_id; + VLOG_NOTICE << "Start processing publish version. transaction_id=" << transaction_id; DBUG_EXECUTE_IF("EnginePublishVersionTask.finish.random", { if (rand() % 100 < (100 * dp->param("percent", 0.5))) { LOG_WARNING("EnginePublishVersionTask.finish.random random failed") @@ -169,7 +169,7 @@ Status EnginePublishVersionTask::execute() { for (auto& tablet_rs : tablet_related_rs) { TabletInfo tablet_info = tablet_rs.first; RowsetSharedPtr rowset = tablet_rs.second; - VLOG_CRITICAL << "begin to publish version on tablet. " + VLOG_CRITICAL << "Start publishing version on tablet. " << "tablet_id=" << tablet_info.tablet_id << ", version=" << version.first << ", transaction_id=" << transaction_id; // if rowset is null, it means this be received write task, but failed during write @@ -254,7 +254,7 @@ Status EnginePublishVersionTask::execute() { #ifndef NDEBUG LOG(INFO) << "transaction_id: " << transaction_id << ", partition id: " << partition_id << ", version: " << version.second - << " start to publish version on tablet: " << tablet_info.tablet_id + << " Start publishing version on tablet: " << tablet_info.tablet_id << ", submit status: " << submit_st.code(); #endif CHECK(submit_st.ok()) << submit_st; @@ -316,7 +316,7 @@ Status EnginePublishVersionTask::execute() { _calculate_tbl_num_delta_rows(tablet_id_to_num_delta_rows); if (!res.is()) { - LOG(INFO) << "finish to publish version on transaction." + LOG(INFO) << "Finished publishing version on transaction." << "transaction_id=" << transaction_id << ", cost(us): " << watch.get_elapse_time_us() << ", error_tablet_size=" << _error_tablet_ids->size() diff --git a/be/src/storage/task/engine_storage_migration_task.cpp b/be/src/storage/task/engine_storage_migration_task.cpp index 0704e2cc3d181c..65ed76059261d2 100644 --- a/be/src/storage/task/engine_storage_migration_task.cpp +++ b/be/src/storage/task/engine_storage_migration_task.cpp @@ -203,7 +203,7 @@ bool EngineStorageMigrationTask::_is_rowsets_size_less_than_threshold( Status EngineStorageMigrationTask::_migrate() { int64_t tablet_id = _tablet->tablet_id(); - LOG(INFO) << "begin to process tablet migrate. " + LOG(INFO) << "Start processing tablet migrate. " << "tablet_id=" << tablet_id << ", dest_store=" << _dest_store->path(); RETURN_IF_ERROR(_engine.tablet_manager()->register_transition_tablet(_tablet->tablet_id(), diff --git a/be/src/storage/task/index_builder.cpp b/be/src/storage/task/index_builder.cpp index b7e2db3b4b68f6..95cadd3ac42a4f 100644 --- a/be/src/storage/task/index_builder.cpp +++ b/be/src/storage/task/index_builder.cpp @@ -64,7 +64,7 @@ Status IndexBuilder::init() { Status IndexBuilder::update_inverted_index_info() { // just do link files - LOG(INFO) << "begin to update_inverted_index_info, tablet=" << _tablet->tablet_id() + LOG(INFO) << "Start updating_inverted_index_info, tablet=" << _tablet->tablet_id() << ", is_drop_op=" << _is_drop_op; // index ids that will not be linked std::set without_index_uids; @@ -662,7 +662,7 @@ Status IndexBuilder::handle_single_rowset(RowsetMetaSharedPtr output_rowset_meta Status IndexBuilder::_write_inverted_index_data(TabletSchemaSPtr tablet_schema, int64_t segment_idx, Block* block) { - VLOG_DEBUG << "begin to write inverted/ann index"; + VLOG_DEBUG << "Start writing inverted/ann index"; // converter block data _olap_data_convertor->set_source_content(block, 0, block->rows()); for (auto i = 0; i < _alter_inverted_indexes.size(); ++i) { @@ -809,7 +809,7 @@ Status IndexBuilder::_add_data(const std::string& column_name, } Status IndexBuilder::handle_inverted_index_data() { - LOG(INFO) << "begin to handle_inverted_index_data"; + LOG(INFO) << "Start handling_inverted_index_data"; DCHECK(_input_rowsets.size() == _output_rowsets.size()); for (auto& _output_rowset : _output_rowsets) { SegmentCacheHandle segment_cache_handle; @@ -823,7 +823,7 @@ Status IndexBuilder::handle_inverted_index_data() { } Status IndexBuilder::do_build_inverted_index() { - LOG(INFO) << "begin to do_build_inverted_index, tablet=" << _tablet->tablet_id() + LOG(INFO) << "Start doing_build_inverted_index, tablet=" << _tablet->tablet_id() << ", is_drop_op=" << _is_drop_op; DBUG_EXECUTE_IF("IndexBuilder::do_build_inverted_index_alter_inverted_indexes_empty", { _alter_inverted_indexes.clear(); }) diff --git a/be/src/util/jni-util.cpp b/be/src/util/jni-util.cpp index 9f5f3999c17579..edf0e192756415 100644 --- a/be/src/util/jni-util.cpp +++ b/be/src/util/jni-util.cpp @@ -107,7 +107,7 @@ const std::string GetKerb5ConfPath() { libhdfs_opts += fmt::format(" -Djdk.lang.processReaperUseDefaultStackSize={}", config::jdk_process_reaper_use_default_stack_size); setenv("LIBHDFS_OPTS", libhdfs_opts.c_str(), 1); - LOG(INFO) << "set final LIBHDFS_OPTS: " << libhdfs_opts; + LOG(INFO) << "Set final LIBHDFS_OPTS: " << libhdfs_opts; } // Only used on non-x86 platform diff --git a/be/src/util/pprof_utils.cpp b/be/src/util/pprof_utils.cpp index 8f8cd7578e4f2e..4ff7f3242123ab 100644 --- a/be/src/util/pprof_utils.cpp +++ b/be/src/util/pprof_utils.cpp @@ -113,7 +113,7 @@ Status PprofUtils::get_readable_profile(const std::string& file_or_content, bool std::string cmd_output; std::string final_cmd = pprof_cmd + absl::Substitute(" --text $0 $1", self_cmdline, final_file); AgentUtils util; - LOG(INFO) << "begin to run command: " << final_cmd; + LOG(INFO) << "Start running command: " << final_cmd; bool rc = util.exec_cmd(final_cmd, &cmd_output, false); // delete raw file @@ -158,7 +158,7 @@ Status PprofUtils::generate_flamegraph(int32_t sample_seconds, AgentUtils util; std::string cmd_output; - LOG(INFO) << "begin to run command: " << cmd.str(); + LOG(INFO) << "Start running command: " << cmd.str(); bool rc = util.exec_cmd(cmd.str(), &cmd_output); if (!rc) { static_cast(io::global_local_filesystem()->delete_file(tmp_file.str())); @@ -175,7 +175,7 @@ Status PprofUtils::generate_flamegraph(int32_t sample_seconds, std::stringstream gen_cmd; gen_cmd << perf_cmd << " script -i " << tmp_file.str() << " | " << stackcollapse_perf_pl << " | " << flamegraph_pl << " > " << graph_file.str(); - LOG(INFO) << "begin to run command: " << gen_cmd.str(); + LOG(INFO) << "Start running command: " << gen_cmd.str(); rc = util.exec_cmd(gen_cmd.str(), &res_content); if (!rc) { static_cast(io::global_local_filesystem()->delete_file(tmp_file.str())); @@ -187,7 +187,7 @@ Status PprofUtils::generate_flamegraph(int32_t sample_seconds, std::stringstream gen_cmd; gen_cmd << perf_cmd << " script -i " << tmp_file.str() << " | " << stackcollapse_perf_pl << " | " << flamegraph_pl; - LOG(INFO) << "begin to run command: " << gen_cmd.str(); + LOG(INFO) << "Start running command: " << gen_cmd.str(); rc = util.exec_cmd(gen_cmd.str(), &res_content, false); if (!rc) { static_cast(io::global_local_filesystem()->delete_file(tmp_file.str())); diff --git a/fe/fe-core/src/main/java/org/apache/doris/alter/MaterializedViewHandler.java b/fe/fe-core/src/main/java/org/apache/doris/alter/MaterializedViewHandler.java index 3252dcde68dbca..f80afcc0208079 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/alter/MaterializedViewHandler.java +++ b/fe/fe-core/src/main/java/org/apache/doris/alter/MaterializedViewHandler.java @@ -251,7 +251,7 @@ public void processCreateMaterializedView(CreateMaterializedViewCommand createMv olapTable.setState(OlapTableState.ROLLUP); Env.getCurrentEnv().getEditLog().logAlterJob(rollupJobV2); - LOG.info("finished to create materialized view job: {}", rollupJobV2.getJobId()); + LOG.info("Finished creating materialized view job: {}", rollupJobV2.getJobId()); } finally { if (olapTable.getState() != OlapTableState.ROLLUP) { // state is not ROLLUP, means encountered some exception before jobs submitted, @@ -352,7 +352,7 @@ public void processBatchAddRollup(String rawSql, List alterOps, Databas BatchAlterJobPersistInfo batchAlterJobV2 = new BatchAlterJobPersistInfo(rollupJobV2List); Env.getCurrentEnv().getEditLog().logBatchAlterJob(batchAlterJobV2); - LOG.info("finished to create materialized view job: {}", logJobIdSet); + LOG.info("Finished creating materialized view job: {}", logJobIdSet); } catch (Exception e) { // remove tablet which has already inserted into TabletInvertedIndex @@ -513,7 +513,7 @@ private RollupJobV2 createMaterializedViewJob(String rawSql, String mvName, Stri } } // end for partitions - LOG.info("finished to create materialized view job: {}", mvJob.getJobId()); + LOG.info("Finished creating materialized view job: {}", mvJob.getJobId()); return mvJob; } @@ -986,7 +986,7 @@ public void processBatchDropRollup(List alterOps, Database db, OlapTabl String tableName = olapTable.getName(); editLog.logBatchDropRollup(new BatchDropInfo(dbId, tableId, tableName, rollupNameMap)); deleteIndexList = rollupNameMap.keySet().stream().collect(Collectors.toList()); - LOG.info("finished drop rollup index[{}] in table[{}]", + LOG.info("Finished dropping rollup index[{}] in table[{}]", String.join("", rollupNameMap.values()), olapTable.getName()); } finally { olapTable.writeUnlock(); @@ -1010,7 +1010,7 @@ public void processDropMaterializedView(DropMaterializedViewCommand dropMaterial editLog.logDropRollup(new DropInfo(db.getId(), olapTable.getId(), olapTable.getName(), mvIndexId, mvName, false, false, 0)); deleteIndexList.add(mvIndexId); - LOG.info("finished drop materialized view [{}] in table [{}]", mvName, olapTable.getName()); + LOG.info("Finished dropping materialized view [{}] in table [{}]", mvName, olapTable.getName()); } catch (MetaNotFoundException e) { if (dropMaterializedViewCommand.isIfExists()) { LOG.info(e.getMessage()); diff --git a/fe/fe-core/src/main/java/org/apache/doris/alter/RollupJobV2.java b/fe/fe-core/src/main/java/org/apache/doris/alter/RollupJobV2.java index 27a1fdcc8df136..f4cf38d209cb79 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/alter/RollupJobV2.java +++ b/fe/fe-core/src/main/java/org/apache/doris/alter/RollupJobV2.java @@ -339,7 +339,7 @@ protected void createRollupReplica() throws AlterCancelException { protected void runPendingJob() throws Exception { Preconditions.checkState(jobState == JobState.PENDING, jobState); - LOG.info("begin to send create rollup replica tasks. job: {}", jobId); + LOG.info("Begin to send create rollup replica tasks. job: {}", jobId); Database db = Env.getCurrentInternalCatalog() .getDbOrException(dbId, s -> new AlterCancelException("Database " + s + " does not exist")); if (!checkTableStable(db)) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/alter/SchemaChangeHandler.java b/fe/fe-core/src/main/java/org/apache/doris/alter/SchemaChangeHandler.java index 818cd47ac82cfd..cab6b588018027 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/alter/SchemaChangeHandler.java +++ b/fe/fe-core/src/main/java/org/apache/doris/alter/SchemaChangeHandler.java @@ -1938,7 +1938,7 @@ private void createJob(String rawSql, long dbId, OlapTable olapTable, Map indexNameToId = new HashMap<>(olapTable.getIndexNameToId()); @@ -3288,7 +3288,7 @@ public void modifyTableLightSchemaChange(String rawSql, Database db, OlapTable o Env.getCurrentEnv().getAnalysisManager().dropStats(olapTable, null); } } - LOG.info("finished modify table's add or drop or modify columns. table: {}, job: {}, is replay: {}", + LOG.info("Finished modifying table's add or drop or modify columns. table: {}, job: {}, is replay: {}", olapTable.getName(), jobId, isReplay); } // for bloom filter, rebuild bloom filter info by table schema in replay @@ -3507,7 +3507,7 @@ private String getCloudClusterName() throws UserException { public void buildOrDeleteTableInvertedIndices(Database db, OlapTable olapTable, Map> indexSchemaMap, List alterIndexes, Map> invertedIndexOnPartitions, boolean isDropOp) throws UserException { - LOG.info("begin to build table's inverted index. table: {}", olapTable.getName()); + LOG.info("Begin to build table's inverted index. table: {}", olapTable.getName()); // for now table's state can only be NORMAL Preconditions.checkState(olapTable.getState() == OlapTableState.NORMAL, olapTable.getState().name()); @@ -3579,7 +3579,7 @@ public void buildOrDeleteTableInvertedIndices(Database db, OlapTable olapTable, if (!FeConstants.runningUnitTest) { Env.getCurrentEnv().getEditLog().logIndexChangeJob(indexChangeJob); } - LOG.info("finish create table's inverted index job. table: {}, partition: {}, job: {}", + LOG.info("Finished creating table's inverted index job. table: {}, partition: {}, job: {}", olapTable.getName(), partitionName, jobId); } // end for partition } // end for index @@ -3704,7 +3704,7 @@ public boolean updateBinlogConfig(Database db, OlapTable olapTable, List new AlterCancelException("Database " + s + " does not exist")); diff --git a/fe/fe-core/src/main/java/org/apache/doris/backup/BackupHandler.java b/fe/fe-core/src/main/java/org/apache/doris/backup/BackupHandler.java index 1ae0e75ac10534..f0cf69e5cc4ca5 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/backup/BackupHandler.java +++ b/fe/fe-core/src/main/java/org/apache/doris/backup/BackupHandler.java @@ -573,7 +573,7 @@ private void backup(Repository repository, Database db, BackupCommand command) t // must put to dbIdToBackupOrRestoreJob after edit log, otherwise the state of job may be changed. addBackupOrRestoreJob(db.getId(), backupJob); - LOG.info("finished to submit backup job: {}", backupJob); + LOG.info("Finished submitting backup job: {}", backupJob); } public void restore(Repository repository, Database db, RestoreCommand command) throws DdlException { @@ -651,7 +651,7 @@ public void restore(Repository repository, Database db, RestoreCommand command) // must put to dbIdToBackupOrRestoreJob after edit log, otherwise the state of job may be changed. addBackupOrRestoreJob(db.getId(), restoreJob); - LOG.info("finished to submit restore job: {}", restoreJob); + LOG.info("Finished submitting restore job: {}", restoreJob); } private void addBackupOrRestoreJob(long dbId, AbstractJob job) { @@ -818,7 +818,7 @@ public void cancel(CancelBackupCommand command) throws DdlException { ErrorReport.reportDdlException(ErrorCode.ERR_COMMON_ERROR, "Failed to cancel job: " + status.getErrMsg()); } - LOG.info("finished to cancel {} job: {}", (command.isRestore() ? "restore" : "backup"), job); + LOG.info("Finished cancelling {} job: {}", (command.isRestore() ? "restore" : "backup"), job); } public boolean handleFinishedSnapshotTask(SnapshotTask task, TFinishTaskRequest request) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/backup/BackupJob.java b/fe/fe-core/src/main/java/org/apache/doris/backup/BackupJob.java index a64a4a98f5d297..df299ec25aa9f3 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/backup/BackupJob.java +++ b/fe/fe-core/src/main/java/org/apache/doris/backup/BackupJob.java @@ -440,9 +440,9 @@ public synchronized Status updateRepo(Repository repo) { repo.getRemoteFileSystem().getStorageProperties().getBackendConfigProperties()); AgentTaskQueue.updateTask(beId, TTaskType.UPLOAD, signature, task); } - LOG.info("finished to update upload job properties. {}", this); + LOG.info("Finished updating upload job properties. {}", this); } - LOG.info("finished to update repo of job. {}", this); + LOG.info("Finished updating repo of job. {}", this); return Status.OK; } @@ -621,7 +621,7 @@ private void prepareAndSendSnapshotTask() { // DO NOT write log here, state will be reset to PENDING after FE restart. Then all snapshot tasks // will be re-generated and be sent again - LOG.info("finished to send snapshot tasks to backend. {}", this); + LOG.info("Finished sending snapshot tasks to backend. {}", this); } private void checkOlapTable(OlapTable olapTable, TableRefInfo backupTableRef) { @@ -764,7 +764,7 @@ private void waitingAllSnapshotsFinished() { // log env.getEditLog().logBackupJob(this); - LOG.info("finished to make snapshots. {}", this); + LOG.info("Finished making snapshots. {}", this); return; } @@ -837,7 +837,7 @@ private void uploadSnapshot() { state = BackupJobState.UPLOADING; // DO NOT write log here, upload tasks will be resend after FE crashed. - LOG.info("finished to send upload tasks. {}", this); + LOG.info("Finished sending upload tasks. {}", this); } private void waitingAllUploadingFinished() { @@ -847,7 +847,7 @@ private void waitingAllUploadingFinished() { // log env.getEditLog().logBackupJob(this); - LOG.info("finished uploading snapshots. {}", this); + LOG.info("Finished uploading snapshots. {}", this); return; } @@ -993,7 +993,7 @@ private void saveMetaInfo(boolean replay) { // log env.getEditLog().logBackupJob(this); - LOG.info("finished to save meta the backup job info file to local.[{}], [{}] {}", + LOG.info("Finished saving meta backup job info file to local.[{}], [{}] {}", localMetaInfoFilePath, localJobInfoFilePath, this); } @@ -1124,7 +1124,7 @@ private void cancelInternal() { // log env.getEditLog().logBackupJob(this); - LOG.info("finished to cancel backup job. current state: {}. {}", curState.name(), this); + LOG.info("Finished cancelling backup job. current state: {}. {}", curState.name(), this); } public boolean isLocalSnapshot() { diff --git a/fe/fe-core/src/main/java/org/apache/doris/backup/Repository.java b/fe/fe-core/src/main/java/org/apache/doris/backup/Repository.java index cac9b9ae1d4ac3..1846bdff0dcd19 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/backup/Repository.java +++ b/fe/fe-core/src/main/java/org/apache/doris/backup/Repository.java @@ -565,7 +565,7 @@ public Status upload(String localFilePath, String remoteFilePath) { } } - LOG.info("finished to upload local file {} to remote file: {}", localFilePath, finalRemotePath); + LOG.info("Finished uploading local file {} to remote file: {}", localFilePath, finalRemotePath); return st; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/backup/RestoreJob.java b/fe/fe-core/src/main/java/org/apache/doris/backup/RestoreJob.java index 0395378de476e4..830c19283b0cd8 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/backup/RestoreJob.java +++ b/fe/fe-core/src/main/java/org/apache/doris/backup/RestoreJob.java @@ -448,9 +448,9 @@ public synchronized Status updateRepo(Repository repo) { repo.getRemoteFileSystem().getStorageProperties().getBackendConfigProperties()); AgentTaskQueue.updateTask(beId, TTaskType.DOWNLOAD, signature, task); } - LOG.info("finished to update download job properties. {}", this); + LOG.info("Finished updating download job properties. {}", this); } - LOG.info("finished to update repo of job. {}", this); + LOG.info("Finished updating repo of job. {}", this); return Status.OK; } @@ -963,7 +963,7 @@ private void checkAndPrepareMeta() { } if (LOG.isDebugEnabled()) { - LOG.debug("finished to prepare restored partitions and tables. {}", this); + LOG.debug("Finished preparing restored partitions and tables. {}", this); } // for now, nothing is modified in catalog @@ -1011,7 +1011,7 @@ private void checkAndPrepareMeta() { } if (LOG.isDebugEnabled()) { - LOG.debug("finished to generate create replica tasks. {}", this); + LOG.debug("Finished generating create replica tasks. {}", this); } } finally { db.readUnlock(); @@ -1027,7 +1027,7 @@ private void checkAndPrepareMeta() { return; } if (LOG.isDebugEnabled()) { - LOG.debug("finished to restore resources. {}", this.jobId); + LOG.debug("Finished restoring resources. {}", this.jobId); } doCreateReplicas(); @@ -1047,7 +1047,7 @@ protected void doCreateReplicas() { .sum(); createReplicaTasksLatch = new MarkedCountDownLatch<>(numBatchTasks); if (numBatchTasks > 0) { - LOG.info("begin to send create replica tasks to BE for restore. total {} tasks. {}", + LOG.info("Begin to send create replica tasks to BE for restore. total {} tasks. {}", numBatchTasks, this); for (AgentBatchTask batchTask : batchTaskPerTable.values()) { for (AgentTask task : batchTask.getAllTasks()) { @@ -1106,7 +1106,7 @@ protected void waitingAllReplicasCreated() { } if (LOG.isDebugEnabled()) { - LOG.debug("finished to create all restored replicas. {}", this); + LOG.debug("Finished creating all restored replicas. {}", this); } allReplicasCreated(); } @@ -1171,7 +1171,7 @@ protected void allReplicasCreated() { } } - LOG.info("finished to prepare meta. {}", this); + LOG.info("Finished preparing meta. {}", this); if (jobInfo.content == null || jobInfo.content == BackupCommand.BackupContent.ALL) { prepareAndSendSnapshotTaskForOlapTable(db); @@ -1266,7 +1266,7 @@ private Status bindLocalAndRemoteOlapTableReplicas( } protected void prepareAndSendSnapshotTaskForOlapTable(Database db) { - LOG.info("begin to make snapshot. {} when restore content is ALL", this); + LOG.info("Begin to make snapshot. {} when restore content is ALL", this); // begin to make snapshots for all replicas // snapshot is for incremental download unfinishedSignatureToId.clear(); @@ -1317,7 +1317,7 @@ protected void prepareAndSendSnapshotTaskForOlapTable(Database db) { } AgentTaskExecutor.submit(batchTask); - LOG.info("finished to send snapshot tasks, num: {}. {}", batchTask.getTaskNum(), this); + LOG.info("Finished sending snapshot tasks, num: {}. {}", batchTask.getTaskNum(), this); } private void checkAndRestoreResources() { @@ -1751,7 +1751,7 @@ protected void waitingAllSnapshotsFinished() { for (ColocatePersistInfo info : colocatePersistInfos) { env.getEditLog().logColocateAddTable(info); } - LOG.info("finished making snapshots. {}", this); + LOG.info("Finished making snapshots. {}", this); return; } @@ -1871,7 +1871,7 @@ protected void downloadRemoteSnapshots() { setState(RestoreJobState.DOWNLOADING); // No edit log here - LOG.info("finished to send download tasks to BE. num: {}. {}", batchTask.getTaskNum(), this); + LOG.info("Finished sending download tasks to BE. num: {}. {}", batchTask.getTaskNum(), this); } protected void downloadLocalSnapshots() { @@ -1996,7 +1996,7 @@ protected void downloadLocalSnapshots() { setState(RestoreJobState.DOWNLOADING); // No edit log here - LOG.info("finished to send download tasks to BE. num: {}. {}", batchTask.getTaskNum(), this); + LOG.info("Finished sending download tasks to BE. num: {}. {}", batchTask.getTaskNum(), this); } protected DownloadTask createDownloadTask(long beId, long signature, long jobId, long dbId, @@ -2059,7 +2059,7 @@ private void waitingAllDownloadFinished() { backupMeta = null; env.getEditLog().logRestoreJob(this); - LOG.info("finished to download. {}", this); + LOG.info("Finished downloading. {}", this); } LOG.info("waiting {} tasks to finish downloading from repo. {}", unfinishedSignatureToId.size(), this); @@ -2088,13 +2088,13 @@ protected void commit() { setState(RestoreJobState.COMMITTING); // No log here - LOG.info("finished to send move dir tasks. num: {}. {}", batchTask.getTaskNum(), this); + LOG.info("Finished sending move dir tasks. num: {}. {}", batchTask.getTaskNum(), this); return; } protected void waitingAllTabletsCommitted() { if (unfinishedSignatureToId.isEmpty()) { - LOG.info("finished to commit all tablet. {}", this); + LOG.info("Finished committing all tablets. {}", this); Status st = allTabletCommitted(false /* not replay */); if (!st.ok()) { status = st; @@ -2438,7 +2438,7 @@ private void cancelInternal(boolean isReplay) { } colocatePersistInfos.clear(); - LOG.info("finished to cancel restore job. current state: {}. is replay: {}. {}", + LOG.info("Finished cancelling restore job. current state: {}. is replay: {}. {}", curState.name(), isReplay, this); // Send release snapshot tasks after log restore job, so that the snapshot won't be released @@ -2447,7 +2447,7 @@ private void cancelInternal(boolean isReplay) { return; } - LOG.info("finished to cancel restore job. is replay: {}. {}", isReplay, this); + LOG.info("Finished cancelling restore job. is replay: {}. {}", isReplay, this); } protected void cleanMetaObjects(boolean isReplay) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/binlog/BinlogGcer.java b/fe/fe-core/src/main/java/org/apache/doris/binlog/BinlogGcer.java index bcee32c148daa5..dc6e33080beb48 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/binlog/BinlogGcer.java +++ b/fe/fe-core/src/main/java/org/apache/doris/binlog/BinlogGcer.java @@ -60,7 +60,7 @@ protected void runAfterCatalogReady() { if (tombstones != null && !tombstones.isEmpty()) { LOG.info("tombstones size: {}", tombstones.size()); } else { - LOG.info("no gc binlog"); + LOG.info("No GC binlog"); return; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/binlog/BinlogManager.java b/fe/fe-core/src/main/java/org/apache/doris/binlog/BinlogManager.java index 0d17efdb6de861..a01599a07a72a9 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/binlog/BinlogManager.java +++ b/fe/fe-core/src/main/java/org/apache/doris/binlog/BinlogManager.java @@ -651,7 +651,7 @@ public List> getDroppedIndexes(long dbId) { } public List gc() { - LOG.info("begin gc binlog"); + LOG.info("Begin to gc binlog"); lock.writeLock().lock(); Map gcDbBinlogMap; @@ -662,7 +662,7 @@ public List gc() { } if (gcDbBinlogMap.isEmpty()) { - LOG.info("gc binlog, dbBinlogMap is null"); + LOG.info("GC binlog, dbBinlogMap is null"); return null; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/CloudTabletStatMgr.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/CloudTabletStatMgr.java index 18da6784acfc29..9adda275fde6cd 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/CloudTabletStatMgr.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/CloudTabletStatMgr.java @@ -220,7 +220,7 @@ private List getAllTabletStats(Function filter) { LOG.error("Error waiting for get tablet stats tasks to complete", e); } - LOG.info("finished to get tablet stats. getStatsTabletNum: {}, cost: {} ms", + LOG.info("Finished getting tablet stats. getStatsTabletNum: {}, cost: {} ms", getStatsTabletNum, (System.currentTimeMillis() - start)); return dbIds; } @@ -258,7 +258,7 @@ private void getActiveTabletStats(Set tablets) { } catch (InterruptedException | ExecutionException e) { LOG.error("Error waiting for get tablet stats tasks to complete", e); } - LOG.info("finished to get {} active tablets stats, cost {}ms", activeTabletNum, + LOG.info("Finished getting {} active tablets stats, cost {}ms", activeTabletNum, System.currentTimeMillis() - start); } @@ -417,7 +417,7 @@ private void updateStatInfo(List dbIds) { tableReplicaCount, tableRowCount, tableRowsetCount, tableSegmentCount, tableTotalLocalIndexSize, tableTotalLocalSegmentSize, 0L, 0L); olapTable.setStatistics(tableStats); - LOG.debug("finished to set row num for table: {} in database: {}", + LOG.debug("Finished setting row num for table: {} in database: {}", table.getName(), db.getFullName()); } finally { table.readUnlock(); @@ -466,7 +466,7 @@ private void updateStatInfo(List dbIds) { + ", avg table byte size=" + avgTableSize); } - LOG.info("finished to update index row num of all databases. cost: {} ms", + LOG.info("Finished updating index row num of all databases. cost: {} ms", (System.currentTimeMillis() - start)); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/Database.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/Database.java index 3e26d7ee146aa6..ea2346df73da93 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/Database.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/Database.java @@ -841,7 +841,7 @@ public synchronized void dropFunctionImpl(FunctionSearchDesc function, boolean i if (FunctionUtil.dropFunctionImpl(function, ifExists, name2Function)) { Env.getCurrentEnv().getEditLog().logDropFunction(function); FunctionUtil.dropFromNereids(this.getFullName(), function); - LOG.info("finished to drop function {}", function.getName().getFunction()); + LOG.info("Finished dropping function {}", function.getName().getFunction()); } } @@ -854,7 +854,7 @@ public synchronized void replayDropFunction(FunctionSearchDesc functionSearchDes functionSearchDesc.getName().getFunction()); } FunctionUtil.dropFromNereids(this.getFullName(), functionSearchDesc); - LOG.info("finished to replay drop function {}", functionSearchDesc.getName().getFunction()); + LOG.info("Finished replaying drop function {}", functionSearchDesc.getName().getFunction()); } catch (UserException e) { throw new RuntimeException(e); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/DomainResolver.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/DomainResolver.java index f5d39a8e8d9b4c..1b10180df195fe 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/DomainResolver.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/DomainResolver.java @@ -62,7 +62,7 @@ public void runAfterCatalogReady() { Map> resolvedIPsMap = Maps.newHashMap(); for (String domain : allDomains) { if (LOG.isDebugEnabled()) { - LOG.debug("begin to resolve domain: {}", domain); + LOG.debug("Begin to resolve domain: {}", domain); } Set resolvedIPs = Sets.newHashSet(); if (!resolveWithBNS(domain, resolvedIPs) && !resolveWithDNS(domain, resolvedIPs)) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/Env.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/Env.java index baa1d6f2a14178..5995243ca4991a 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/Env.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/Env.java @@ -1476,7 +1476,7 @@ protected void getClusterIdAndRole() throws IOException { } Preconditions.checkState(helperNodes.size() == 1); - LOG.info("finished to get cluster id: {}, isElectable: {}, role: {} and node name: {}", + LOG.info("Finished getting cluster id: {}, isElectable: {}, role: {} and node name: {}", clusterId, isElectable, role.name(), nodeName); } @@ -1691,7 +1691,7 @@ private void transferToMaster() { // replay journals. -1 means replay all the journals larger than current journal id. replayJournal(-1); long replayEndTime = System.currentTimeMillis(); - LOG.info("finish replay in " + (replayEndTime - replayStartTime) + " msec"); + LOG.info("Finished replaying in " + (replayEndTime - replayStartTime) + " msec"); removeDroppedFrontends(removedFrontends); @@ -2088,7 +2088,7 @@ private void initLowerCaseTableNames() { LOG.error("Initialization of lower_case_table_names failed.", e); System.exit(-1); } - LOG.info("Finish initializing lower_case_table_names, value is {}", GlobalVariable.lowerCaseTableNames); + LOG.info("Finished initializing lower_case_table_names, value is {}", GlobalVariable.lowerCaseTableNames); } // After the cluster initialization is complete, 'lower_case_table_names' can not be modified during the cluster @@ -2269,7 +2269,7 @@ public long loadHeaderCOR1(DataInputStream dis, long checksum) throws IOExceptio // just read and skip it. dis.readBoolean(); - LOG.info("finished replay header from image"); + LOG.info("Finished replaying header from image"); return newChecksum; } @@ -2278,7 +2278,7 @@ public long loadMasterInfo(DataInputStream dis, long checksum) throws IOExceptio long newChecksum = checksum ^ masterInfo.getRpcPort(); newChecksum ^= masterInfo.getHttpPort(); - LOG.info("finished replay masterInfo from image"); + LOG.info("Finished replaying masterInfo from image"); return newChecksum; } @@ -2295,17 +2295,17 @@ public long loadFrontends(DataInputStream dis, long checksum) throws IOException for (int i = 0; i < size; i++) { removedFrontends.add(Text.readString(dis)); } - LOG.info("finished replay frontends from image"); + LOG.info("Finished replaying frontends from image"); return newChecksum; } public long loadBackends(DataInputStream dis, long checksum) throws IOException { - LOG.info("start loading backends from image"); + LOG.info("Start loading backends from image"); return systemInfo.loadBackends(dis, checksum); } public long loadDb(DataInputStream dis, long checksum) throws IOException, DdlException { - LOG.info("start loading db from image"); + LOG.info("Start loading db from image"); return getInternalCatalog().loadDb(dis, checksum); } @@ -2323,7 +2323,7 @@ public long loadExportJob(DataInputStream dis, long checksum) throws IOException exportMgr.unprotectAddJob(job); } } - LOG.info("finished replay exportJob from image"); + LOG.info("Finished replaying exportJob from image"); return newChecksum; } @@ -2333,7 +2333,7 @@ public long loadAlterJob(DataInputStream dis, long checksum) for (JobType type : JobType.values()) { newChecksum = loadAlterJob(dis, newChecksum, type); } - LOG.info("finished replay alterJob from image"); + LOG.info("Finished replaying alterJob from image"); return newChecksum; } @@ -2389,20 +2389,20 @@ public long loadAlterJob(DataInputStream dis, long checksum, JobType type) public long loadBackupHandler(DataInputStream dis, long checksum) throws IOException { getBackupHandler().readFields(dis); getBackupHandler().setEnv(this); - LOG.info("finished replay backupHandler from image"); + LOG.info("Finished replaying backupHandler from image"); return checksum; } public long loadDeleteHandler(DataInputStream dis, long checksum) throws IOException { this.deleteHandler = DeleteHandler.read(dis); - LOG.info("finished replay deleteHandler from image"); + LOG.info("Finished replaying deleteHandler from image"); return checksum; } public long loadAuth(DataInputStream dis, long checksum) throws IOException { // CAN NOT use Auth.read(), cause this auth instance is already passed to DomainResolver auth.readFields(dis); - LOG.info("finished replay auth from image"); + LOG.info("Finished replaying auth from image"); return checksum; } @@ -2410,7 +2410,7 @@ public long loadTransactionState(DataInputStream dis, long checksum) throws IOEx int size = dis.readInt(); long newChecksum = checksum ^ size; globalTransactionMgr.readFields(dis); - LOG.info("finished replay transactionState from image"); + LOG.info("Finished replaying transactionState from image"); return newChecksum; } @@ -2423,87 +2423,87 @@ public long loadRecycleBin(DataInputStream dis, long checksum) throws IOExceptio for (Long dbId : recycleBin.getAllDbIds()) { globalTransactionMgr.addDatabaseTransactionMgr(dbId); } - LOG.info("finished replay recycleBin from image"); + LOG.info("Finished replaying recycleBin from image"); return checksum; } // global variable persistence public long loadGlobalVariable(DataInputStream in, long checksum) throws IOException, DdlException { VariableMgr.read(in); - LOG.info("finished replay globalVariable from image"); + LOG.info("Finished replaying globalVariable from image"); return checksum; } // load binlogs public long loadBinlogs(DataInputStream dis, long checksum) throws IOException { binlogManager.read(dis, checksum); - LOG.info("finished replay binlogMgr from image"); + LOG.info("Finished replaying binlogMgr from image"); return checksum; } public long loadColocateTableIndex(DataInputStream dis, long checksum) throws IOException { Env.getCurrentColocateIndex().readFields(dis); - LOG.info("finished replay colocateTableIndex from image"); + LOG.info("Finished replaying colocateTableIndex from image"); return checksum; } public long loadRoutineLoadJobs(DataInputStream dis, long checksum) throws IOException { Env.getCurrentEnv().getRoutineLoadManager().readFields(dis); - LOG.info("finished replay routineLoadJobs from image"); + LOG.info("Finished replaying routineLoadJobs from image"); return checksum; } public long loadLoadJobsV2(DataInputStream in, long checksum) throws IOException { loadManager.readFields(in); - LOG.info("finished replay loadJobsV2 from image"); + LOG.info("Finished replaying loadJobsV2 from image"); return checksum; } public long loadAsyncJobManager(DataInputStream in, long checksum) throws IOException { jobManager.readFields(in); - LOG.info("finished replay asyncJobMgr from image"); + LOG.info("Finished replaying asyncJobMgr from image"); return checksum; } public long saveAsyncJobManager(CountingDataOutputStream out, long checksum) throws IOException { jobManager.write(out); - LOG.info("finished save analysisMgr to image"); + LOG.info("Finished saving analysisMgr to image"); return checksum; } public long loadResources(DataInputStream in, long checksum) throws IOException { resourceMgr = ResourceMgr.read(in); - LOG.info("finished replay resources from image"); + LOG.info("Finished replaying resources from image"); return checksum; } public long loadWorkloadGroups(DataInputStream in, long checksum) throws IOException { workloadGroupMgr = WorkloadGroupMgr.read(in); - LOG.info("finished replay workload groups from image"); + LOG.info("Finished replaying workload groups from image"); return checksum; } public long loadWorkloadSchedPolicy(DataInputStream in, long checksum) throws IOException { workloadSchedPolicyMgr = WorkloadSchedPolicyMgr.read(in); - LOG.info("finished replay workload sched policy from image"); + LOG.info("Finished replaying workload sched policy from image"); return checksum; } public long loadPlsqlProcedure(DataInputStream in, long checksum) throws IOException { Text.readString(in); - LOG.info("finished replay plsql procedure from image"); + LOG.info("Finished replaying plsql procedure from image"); return checksum; } public long loadSmallFiles(DataInputStream in, long checksum) throws IOException { smallFileMgr.readFields(in); - LOG.info("finished replay smallFiles from image"); + LOG.info("Finished replaying smallFiles from image"); return checksum; } public long loadSqlBlockRule(DataInputStream in, long checksum) throws IOException { sqlBlockRuleMgr = SqlBlockRuleMgr.read(in); - LOG.info("finished replay sqlBlockRule from image"); + LOG.info("Finished replaying sqlBlockRule from image"); return checksum; } @@ -2523,13 +2523,13 @@ public long loadAuthenticationIntegrations(DataInputStream in, long checksum) th **/ public long loadPolicy(DataInputStream in, long checksum) throws IOException { policyMgr = PolicyMgr.read(in); - LOG.info("finished replay policy from image"); + LOG.info("Finished replaying policy from image"); return checksum; } public long loadIndexPolicy(DataInputStream in, long checksum) throws IOException { indexPolicyMgr = IndexPolicyMgr.read(in); - LOG.info("finished replay index policy from image"); + LOG.info("Finished replaying index policy from image"); return checksum; } @@ -2537,14 +2537,14 @@ public long loadIndexPolicy(DataInputStream in, long checksum) throws IOExceptio * Load catalogs through file. **/ public long loadCatalog(DataInputStream in, long checksum) throws IOException { - LOG.info("start loading catalog from image"); + LOG.info("Start loading catalog from image"); CatalogMgr mgr = CatalogMgr.read(in); // When enable the multi catalog in the first time, the "mgr" will be a null value. // So ignore it to use default catalog manager. if (mgr != null) { this.catalogMgr = mgr; } - LOG.info("finished replay catalog from image"); + LOG.info("Finished replaying catalog from image"); return checksum; } @@ -2553,43 +2553,43 @@ public long loadCatalog(DataInputStream in, long checksum) throws IOException { **/ public long loadGlobalFunction(DataInputStream in, long checksum) throws IOException { this.globalFunctionMgr = GlobalFunctionMgr.read(in); - LOG.info("finished replay global function from image"); + LOG.info("Finished replaying global function from image"); return checksum; } public long loadAnalysisManager(DataInputStream in, long checksum) throws IOException { this.analysisManager = AnalysisManager.readFields(in); - LOG.info("finished replay AnalysisMgr from image"); + LOG.info("Finished replaying AnalysisMgr from image"); return checksum; } public long loadInsertOverwrite(DataInputStream in, long checksum) throws IOException { this.insertOverwriteManager = InsertOverwriteManager.read(in); - LOG.info("finished replay iot from image"); + LOG.info("Finished replaying iot from image"); return checksum; } public long loadKeyManagerStore(DataInputStream in, long checksum) throws IOException { this.keyManagerStore = KeyManagerStore.read(in); - LOG.info("finished replay KeyManagerStore from image"); + LOG.info("Finished replaying KeyManagerStore from image"); return checksum; } public long saveInsertOverwrite(CountingDataOutputStream out, long checksum) throws IOException { this.insertOverwriteManager.write(out); - LOG.info("finished save iot to image"); + LOG.info("Finished saving iot to image"); return checksum; } public long loadDictionaryManager(DataInputStream in, long checksum) throws IOException { this.dictionaryManager = DictionaryManager.read(in); - LOG.info("finished replay dictMgr from image"); + LOG.info("Finished replaying dictMgr from image"); return checksum; } public long saveDictionaryManager(CountingDataOutputStream out, long checksum) throws IOException { this.dictionaryManager.write(out); - LOG.info("finished save dictMgr to image"); + LOG.info("Finished saving dictMgr to image"); return checksum; } @@ -2879,19 +2879,19 @@ public long saveAnalysisMgr(CountingDataOutputStream dos, long checksum) throws public long saveKeyManagerStore(CountingDataOutputStream out, long checksum) throws IOException { this.keyManagerStore.write(out); - LOG.info("finished save KeyManager to image"); + LOG.info("Finished saving KeyManager to image"); return checksum; } public long saveConstraintManager(CountingDataOutputStream out, long checksum) throws IOException { constraintManager.write(out); - LOG.info("finished save ConstraintManager to image"); + LOG.info("Finished saving ConstraintManager to image"); return checksum; } public long loadConstraintManager(DataInputStream in, long checksum) throws IOException { this.constraintManager = ConstraintManager.read(in); - LOG.info("finished replay ConstraintManager from image"); + LOG.info("Finished replaying ConstraintManager from image"); return checksum; } @@ -3084,7 +3084,7 @@ protected synchronized void runOneCycle() { System.exit(-1); } Preconditions.checkNotNull(newType); - LOG.info("begin to transfer FE type from {} to {}", feType, newType); + LOG.info("Begin to transfer FE type from {} to {}", feType, newType); if (feType == newType) { return; } @@ -3171,7 +3171,7 @@ protected synchronized void runOneCycle() { } // end switch formerFeType feType = newType; - LOG.info("finished to transfer FE type to {}", feType); + LOG.info("Finished transferring FE type to {}", feType); } } // end runOneCycle }; @@ -4882,7 +4882,7 @@ public void replayAddFrontend(Frontend fe) { * 1. first, add a FE as OBSERVER. * 2. This OBSERVER is restarted with ROLE and VERSION file being DELETED. * In this case, this OBSERVER will be started as a FOLLOWER, and add itself to the frontends. - * 3. this "FOLLOWER" begin to load image or replay journal, + * 3. this "FOLLOWER" Begin to load image or replay journal, * then find the origin OBSERVER in image or journal. * This will cause UNDEFINED behavior, so it is better to exit and fix it manually. */ @@ -5648,7 +5648,7 @@ public void modifyTableColocate(Database db, OlapTable table, String assignedGro TablePropertyInfo info = new TablePropertyInfo(db.getId(), table.getId(), groupId, properties); editLog.logModifyTableColocate(info); } - LOG.info("finished modify table's colocation property. table: {}, is replay: {}", table.getName(), isReplay); + LOG.info("Finished modifying table's colocation property. table: {}, is replay: {}", table.getName(), isReplay); } public void replayModifyTableColocate(TablePropertyInfo info) throws MetaNotFoundException { @@ -6449,12 +6449,12 @@ public long loadBrokers(DataInputStream dis, long checksum) throws IOException, } brokerMgr.replayAddBrokers(brokerName, addrs); } - LOG.info("finished replay brokerMgr from image"); + LOG.info("Finished replaying brokerMgr from image"); return checksum; } public String dumpImage() { - LOG.info("begin to dump meta data"); + LOG.info("Begin to dump meta data"); String dumpFilePath; List databases = Lists.newArrayList(); List> tableLists = Lists.newArrayList(); @@ -6486,7 +6486,7 @@ public String dumpImage() { } dumpFilePath = dumpFile.getAbsolutePath(); try { - LOG.info("begin to dump {}", dumpFilePath); + LOG.info("Begin to dump {}", dumpFilePath); saveImage(dumpFile, journalId); } catch (IOException e) { LOG.error("failed to dump image to {}", dumpFilePath, e); @@ -6501,7 +6501,7 @@ public String dumpImage() { unlock(); } - LOG.info("finished dumping image to {}", dumpFilePath); + LOG.info("Finished dumping image to {}", dumpFilePath); return dumpFilePath; } @@ -6739,7 +6739,7 @@ public void convertDistributionType(Database db, OlapTable tbl) throws DdlExcept } TableInfo tableInfo = TableInfo.createForModifyDistribution(db.getId(), tbl.getId()); editLog.logModifyDistributionType(tableInfo); - LOG.info("finished to modify distribution type of table from hash to random : " + tbl.getName()); + LOG.info("Finished modifying distribution type of table from hash to random: " + tbl.getName()); } finally { tbl.writeUnlock(); } @@ -6816,7 +6816,7 @@ public void replaceTempPartition(Database db, OlapTable olapTable, ReplacePartit versionTime, isForceDropOld); editLog.logReplaceTempPartition(info); - LOG.info("finished to replace partitions {} with temp partitions {} from table: {}", + LOG.info("Finished replacing partitions {} with temp partitions {} from table: {}", replacePartitionOp.getPartitionNames(), replacePartitionOp.getTempPartitionNames(), olapTable.getName()); } @@ -6860,7 +6860,7 @@ public long savePlugins(CountingDataOutputStream dos, long checksum) throws IOEx public long loadPlugins(DataInputStream dis, long checksum) throws IOException { Env.getCurrentPluginMgr().readFields(dis); - LOG.info("finished replay plugins from image"); + LOG.info("Finished replaying plugins from image"); return checksum; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/LocalTabletInvertedIndex.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/LocalTabletInvertedIndex.java index 27d1b6ba6755e1..3b8838e55557ca 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/LocalTabletInvertedIndex.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/LocalTabletInvertedIndex.java @@ -116,7 +116,7 @@ public void tabletReport(long backendId, Map backendTablets, try { if (LOG.isDebugEnabled()) { - LOG.debug("begin to do tablet diff with backend[{}]. num: {}", backendId, backendTablets.size()); + LOG.debug("Begin to do tablet diff with backend[{}]. num: {}", backendId, backendTablets.size()); } Map replicaMetaWithBackend = backingReplicaMetaTable.get(backendId); @@ -478,7 +478,7 @@ private void logTabletReportSummary(long backendId, long feTabletNum, long toPublishTransactionsPartitions = transactionsToPublish.values().stream() .mapToLong(m -> m.values().size()).sum(); - LOG.info("finished to do tablet diff with backend[{}]. fe tablet num: {}, backend tablet num: {}. " + LOG.info("Finished doing tablet diff with backend[{}]. fe tablet num: {}, backend tablet num: {}. " + "sync: {}, metaDel: {}, foundInMeta: {}, migration: {}, " + "backend partition num: {}, backend need update: {}, " + "found invalid transactions {}(partitions: {}), " diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/TabletStatMgr.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/TabletStatMgr.java index 37b198652be4ff..6d7ecbe4580c30 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/TabletStatMgr.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/TabletStatMgr.java @@ -112,7 +112,7 @@ protected void runAfterCatalogReady() { waitForTabletStatUpdate(); if (LOG.isDebugEnabled()) { - LOG.debug("finished to get tablet stat of all backends. cost: {} ms", + LOG.debug("Finished getting tablet stats of all backends. cost: {} ms", (System.currentTimeMillis() - start)); } @@ -266,7 +266,7 @@ protected void runAfterCatalogReady() { tableTotalRemoteIndexSize, tableTotalRemoteSegmentSize)); if (LOG.isDebugEnabled()) { - LOG.debug("finished to set row num for table: {} in database: {}", + LOG.debug("Finished setting row num for table: {} in database: {}", table.getName(), db.getFullName()); } } finally { @@ -313,7 +313,7 @@ protected void runAfterCatalogReady() { + ", avg table byte size=" + avgTableSize); } - LOG.info("finished to update index row num of all databases. cost: {} ms", + LOG.info("Finished updating index row num of all databases. cost: {} ms", (System.currentTimeMillis() - start)); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/clone/ColocateTableCheckerAndBalancer.java b/fe/fe-core/src/main/java/org/apache/doris/clone/ColocateTableCheckerAndBalancer.java index 7dc0a9125dddf7..d6bb4d8ca883d3 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/clone/ColocateTableCheckerAndBalancer.java +++ b/fe/fe-core/src/main/java/org/apache/doris/clone/ColocateTableCheckerAndBalancer.java @@ -591,7 +591,7 @@ private void matchGroups() { } // end for groups long cost = System.currentTimeMillis() - start; - LOG.info("finished to check tablets. unhealth/total/added/in_sched/not_ready/exceed_limit: {}/{}/{}/{}/{}/{}, " + LOG.info("Finished checking tablets. unhealth/total/added/in_sched/not_ready/exceed_limit: {}/{}/{}/{}/{}/{}, " + "cost: {} ms", counter.unhealthyTabletNum, counter.totalTabletNum, counter.addToSchedulerTabletNum, counter.tabletInScheduler, counter.tabletNotReady, counter.tabletExceedLimit, cost); diff --git a/fe/fe-core/src/main/java/org/apache/doris/clone/TabletChecker.java b/fe/fe-core/src/main/java/org/apache/doris/clone/TabletChecker.java index e652fc68d460ba..103fb0f8527ad8 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/clone/TabletChecker.java +++ b/fe/fe-core/src/main/java/org/apache/doris/clone/TabletChecker.java @@ -348,7 +348,7 @@ private void checkTablets() { tabletCountByStatus.get("not_ready").set(counter.tabletNotReady); tabletCountByStatus.get("exceed_limit").set(counter.tabletExceedLimit); - LOG.info("finished to check tablets. unhealth/total/added/in_sched/not_ready/exceed_limit: {}/{}/{}/{}/{}/{}," + LOG.info("Finished checking tablets. unhealth/total/added/in_sched/not_ready/exceed_limit: {}/{}/{}/{}/{}/{}," + "cost: {} ms", counter.unhealthyTabletNum, counter.totalTabletNum, counter.addToSchedulerTabletNum, counter.tabletInScheduler, counter.tabletNotReady, counter.tabletExceedLimit, cost); diff --git a/fe/fe-core/src/main/java/org/apache/doris/clone/TabletScheduler.java b/fe/fe-core/src/main/java/org/apache/doris/clone/TabletScheduler.java index a15830efe0151b..ad2050bb41fa32 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/clone/TabletScheduler.java +++ b/fe/fe-core/src/main/java/org/apache/doris/clone/TabletScheduler.java @@ -363,7 +363,7 @@ public void runAfterCatalogReady() { stat.counterTabletScheduleRound.incrementAndGet(); // Add a log message to indicate that this thread is operating normally. - LOG.info("finished to tablet scheduler. cost: {} ms", + LOG.info("Finished tablet scheduling. cost: {} ms", System.currentTimeMillis() - start); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/cloud/CacheHotspotManager.java b/fe/fe-core/src/main/java/org/apache/doris/cloud/CacheHotspotManager.java index 500f65de153df4..2acbc0a11b63cd 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/cloud/CacheHotspotManager.java +++ b/fe/fe-core/src/main/java/org/apache/doris/cloud/CacheHotspotManager.java @@ -720,7 +720,7 @@ public Map> warmUpNewClusterByTable(long jobId, String dstClu Map> beToWarmUpTablets = new HashMap<>(); Long totalFileCache = getFileCacheCapacity(dstClusterName); Long warmUpTotalFileCache = 0L; - LOG.info("Start warm up job {}, cluster {}, total cache size: {}", + LOG.info("Start warming up job {}, cluster {}, total cache size: {}", jobId, dstClusterName, totalFileCache); List backends = getBackendsFromCluster(dstClusterName); LOG.info("Got {} backends for cluster {}", backends.size(), dstClusterName); @@ -835,7 +835,7 @@ public long createJob(WarmUpClusterCommand stmt) throws AnalysisException { addCloudWarmUpJob(warmUpJob); Env.getCurrentEnv().getEditLog().logModifyCloudWarmUpJob(warmUpJob); - LOG.info("finished to create cloud warm up job: {}", warmUpJob.getJobId()); + LOG.info("Finished creating cloud warm up job: {}", warmUpJob.getJobId()); return jobId; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/cloud/backup/CloudRestoreJob.java b/fe/fe-core/src/main/java/org/apache/doris/cloud/backup/CloudRestoreJob.java index 980e75ee2b58e8..e556773d0f4f3b 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/cloud/backup/CloudRestoreJob.java +++ b/fe/fe-core/src/main/java/org/apache/doris/cloud/backup/CloudRestoreJob.java @@ -233,7 +233,7 @@ public void waitingAllReplicasCreated() { return; } if (LOG.isDebugEnabled()) { - LOG.debug("finished to create all restored replicas. {}", this); + LOG.debug("Finished creating all restored replicas. {}", this); } allReplicasCreated(); } @@ -242,12 +242,12 @@ public void waitingAllSnapshotsFinished() { snapshotFinishedTime = System.currentTimeMillis(); state = RestoreJobState.DOWNLOAD; env.getEditLog().logRestoreJob(this); - LOG.info("finished making snapshots. {}", this); + LOG.info("Finished making snapshots. {}", this); } @Override protected void prepareAndSendSnapshotTaskForOlapTable(Database db) { - LOG.info("begin to make snapshot. {} when restore content is ALL", this); + LOG.info("Begin to make snapshot. {} when restore content is ALL", this); unfinishedSignatureToId.clear(); taskProgress.clear(); taskErrMsg.clear(); @@ -281,7 +281,7 @@ protected void prepareAndSendSnapshotTaskForOlapTable(Database db) { } finally { db.readUnlock(); } - LOG.info("finished to send snapshot tasks, num: {}. {}", 0, this); + LOG.info("Finished sending snapshot tasks, num: {}. {}", 0, this); } @Override diff --git a/fe/fe-core/src/main/java/org/apache/doris/cloud/catalog/CloudClusterChecker.java b/fe/fe-core/src/main/java/org/apache/doris/cloud/catalog/CloudClusterChecker.java index 338d619604f372..96f00fa2758229 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/cloud/catalog/CloudClusterChecker.java +++ b/fe/fe-core/src/main/java/org/apache/doris/cloud/catalog/CloudClusterChecker.java @@ -96,7 +96,7 @@ private void checkToAddCluster(Map remoteClusterIdToPB, Set { if (LOG.isDebugEnabled()) { - LOG.debug("begin to add clusterId: {}", addId); + LOG.debug("Begin to add clusterId: {}", addId); } // Attach tag to BEs String clusterName = remoteClusterIdToPB.get(addId).getClusterName(); @@ -144,7 +144,7 @@ private void checkToDelCluster(Map remoteClusterIdToPB, Set { if (LOG.isDebugEnabled()) { - LOG.debug("begin to drop clusterId: {}", delId); + LOG.debug("Begin to drop clusterId: {}", delId); } List toDel = new ArrayList<>(finalClusterIdToBackend.getOrDefault(delId, new ArrayList<>())); @@ -385,7 +385,7 @@ protected void runAfterCatalogReady() { private void checkFeNodesMapValid() { if (LOG.isDebugEnabled()) { - LOG.debug("begin checkFeNodesMapValid"); + LOG.debug("Begin to checkfenodesmapvalid"); } Map> clusterIdToBackend = cloudSystemInfoService.getCloudClusterIdToBackend(false); Set clusterIds = new HashSet<>(); diff --git a/fe/fe-core/src/main/java/org/apache/doris/cloud/catalog/CloudInstanceStatusChecker.java b/fe/fe-core/src/main/java/org/apache/doris/cloud/catalog/CloudInstanceStatusChecker.java index 90c8ea42573504..7bbc22159aa4e3 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/cloud/catalog/CloudInstanceStatusChecker.java +++ b/fe/fe-core/src/main/java/org/apache/doris/cloud/catalog/CloudInstanceStatusChecker.java @@ -69,7 +69,7 @@ protected void runAfterCatalogReady() { syncStorageVault(instance); processVirtualClusters(instance.getClustersList()); // Add a log message to indicate that this thread is operating normally. - LOG.info("finished to cloud instance checker. cost: {} ms", + LOG.info("Finished cloud instance checking. cost: {} ms", System.currentTimeMillis() - start); } catch (Exception e) { LOG.warn("get instance from ms exception", e); diff --git a/fe/fe-core/src/main/java/org/apache/doris/cloud/catalog/CloudTabletRebalancer.java b/fe/fe-core/src/main/java/org/apache/doris/cloud/catalog/CloudTabletRebalancer.java index d95b90f899306c..84b100504184fd 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/cloud/catalog/CloudTabletRebalancer.java +++ b/fe/fe-core/src/main/java/org/apache/doris/cloud/catalog/CloudTabletRebalancer.java @@ -542,7 +542,7 @@ protected void runAfterCatalogReady() { sleepSeconds = 1L; } setInterval(sleepSeconds * 1000L); - LOG.info("finished to rebalancer. cost: {} ms, rebalancer sche interval {} s", + LOG.info("Finished rebalancing. cost: {} ms, rebalancer schedule interval {} s", (System.currentTimeMillis() - start), sleepSeconds); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/cloud/catalog/CloudUpgradeMgr.java b/fe/fe-core/src/main/java/org/apache/doris/cloud/catalog/CloudUpgradeMgr.java index 0a466e73f5f4d7..fe25921457b583 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/cloud/catalog/CloudUpgradeMgr.java +++ b/fe/fe-core/src/main/java/org/apache/doris/cloud/catalog/CloudUpgradeMgr.java @@ -150,6 +150,6 @@ public void setBeStateInactive(long beId) { } be.setActive(false); /* now user can get BE inactive status from `show backends;` */ Env.getCurrentEnv().getEditLog().logModifyBackend(be); - LOG.info("finished to modify backend {} ", be); + LOG.info("Finished modifying backend {} ", be); } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/cloud/load/CloudBrokerLoadJob.java b/fe/fe-core/src/main/java/org/apache/doris/cloud/load/CloudBrokerLoadJob.java index cd5c8dd1b0c52f..de11930f2115d3 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/cloud/load/CloudBrokerLoadJob.java +++ b/fe/fe-core/src/main/java/org/apache/doris/cloud/load/CloudBrokerLoadJob.java @@ -319,7 +319,7 @@ protected void unprotectedExecuteRetry(FailMsg failMsg) { try { LOG.debug(new LogBuilder(LogKey.LOAD_JOB, id) .add("label", label) - .add("msg", "begin to abort txn") + .add("msg", "Begin to abort txn") .build()); Env.getCurrentGlobalTransactionMgr().abortTransaction(dbId, label, failMsg.getMsg()); } catch (UserException e) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/cloud/master/CloudReportHandler.java b/fe/fe-core/src/main/java/org/apache/doris/cloud/master/CloudReportHandler.java index c25574b81131b1..c5b9cd8125d9e6 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/cloud/master/CloudReportHandler.java +++ b/fe/fe-core/src/main/java/org/apache/doris/cloud/master/CloudReportHandler.java @@ -57,7 +57,7 @@ public void tabletReport(long backendId, Map backendTablets, deleteFromBackend(backendId, tabletIdsNeedDrop); Backend be = Env.getCurrentSystemInfo().getBackend(backendId); - LOG.info("finished to handle task report from backend {}-{}, " + LOG.info("Finished handling task report from backend {}-{}, " + "diff task num: {}, cost: {} ms.", backendId, be != null ? be.getHost() : "", tabletIdsNeedDrop.size(), diff --git a/fe/fe-core/src/main/java/org/apache/doris/cloud/rpc/MetaServiceProxy.java b/fe/fe-core/src/main/java/org/apache/doris/cloud/rpc/MetaServiceProxy.java index e28a1d40f0516d..7b5f0a044e6b05 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/cloud/rpc/MetaServiceProxy.java +++ b/fe/fe-core/src/main/java/org/apache/doris/cloud/rpc/MetaServiceProxy.java @@ -124,7 +124,7 @@ public Cloud.GetInstanceResponse getInstance(Cloud.GetInstanceRequest request) } public void removeProxy(String address) { - LOG.warn("begin to remove proxy: {}", address); + LOG.warn("Begin to remove proxy: {}", address); MetaServiceClient service; lock.lock(); try { diff --git a/fe/fe-core/src/main/java/org/apache/doris/cloud/system/CloudSystemInfoService.java b/fe/fe-core/src/main/java/org/apache/doris/cloud/system/CloudSystemInfoService.java index 34865f06e61432..59eb653d47252c 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/cloud/system/CloudSystemInfoService.java +++ b/fe/fe-core/src/main/java/org/apache/doris/cloud/system/CloudSystemInfoService.java @@ -1078,7 +1078,7 @@ public String addCloudCluster(final String clusterName, final String userName) t wlock.unlock(); } - LOG.info("begin to get cloud cluster from remote, clusterName={}, userName={}", clusterName, userName); + LOG.info("Begin to get cloud cluster from remote, clusterName={}, userName={}", clusterName, userName); // Get cloud cluster info from resource manager Cloud.GetClusterResponse response = getCloudCluster(clusterName, "", userName); diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/util/SmallFileMgr.java b/fe/fe-core/src/main/java/org/apache/doris/common/util/SmallFileMgr.java index 4347003d000208..80fb6d7ae8cc7b 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/util/SmallFileMgr.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/util/SmallFileMgr.java @@ -182,7 +182,7 @@ private void downloadAndAddFile(long dbId, String catalog, String fileName, Stri Env.getCurrentEnv().getEditLog().logCreateSmallFile(smallFile); - LOG.info("finished to add file {} from url {}. current file number: {}", fileName, downloadUrl, + LOG.info("Finished adding file {} from url {}. current file number: {}", fileName, downloadUrl, idToFiles.size()); } } @@ -218,7 +218,7 @@ public void removeFile(long dbId, String catalog, String fileName, boolean isRep Env.getCurrentEnv().getEditLog().logDropSmallFile(smallFile); } - LOG.info("finished to remove file {}. current file number: {}. is replay: {}", + LOG.info("Finished removing file {}. current file number: {}. is replay: {}", fileName, idToFiles.size(), isReplay); } else { throw new DdlException("No such file: " + fileName); diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/CatalogMgr.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/CatalogMgr.java index d2a8d0cd555732..8dc8467dd3fb8c 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/datasource/CatalogMgr.java +++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/CatalogMgr.java @@ -304,7 +304,7 @@ public void dropCatalog(String catalogName, boolean ifExists) throws UserExcepti if (removedCatalog == null) { return; } - LOG.info("finished to drop catalog {}:{}", removedCatalog.catalogName, removedCatalog.catalogId); + LOG.info("Finished dropping catalog {}:{}", removedCatalog.catalogName, removedCatalog.catalogId); } /** @@ -550,7 +550,7 @@ private void createCatalogInternal(CatalogIf catalog, boolean isReplay) throws D Env.getCurrentEnv().getRefreshManager().addToRefreshMap(catalogId, sec); } addCatalog(catalog); - LOG.info("finished to create catalog {}:{}, is replay: {}", catalog.getName(), catalog.getId(), isReplay); + LOG.info("Finished creating catalog {}:{}, is replay: {}", catalog.getName(), catalog.getId(), isReplay); } finally { writeUnlock(); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/ExternalCatalog.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/ExternalCatalog.java index 7d188ff42d70a0..d83b4c795fa3d7 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/datasource/ExternalCatalog.java +++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/ExternalCatalog.java @@ -351,7 +351,7 @@ public boolean tableExistInLocal(String dbName, String tblName) { */ public final synchronized void makeSureInitialized() { if (LOG.isDebugEnabled()) { - LOG.debug("start to init catalog {}:{}", name, id); + LOG.debug("Start initing catalog {}:{}", name, id); } if (isInitializing) { if (LOG.isDebugEnabled()) { @@ -379,7 +379,7 @@ public final synchronized void makeSureInitialized() { protected final void initLocalObjects() { if (!objectCreated) { if (LOG.isDebugEnabled()) { - LOG.debug("start to init local objects of catalog {}:{}", getName(), id, new Exception()); + LOG.debug("Start initing local objects of catalog {}:{}", getName(), id, new Exception()); } initLocalObjectsImpl(); objectCreated = true; @@ -1032,7 +1032,7 @@ public boolean createTable(CreateTableInfo createTableInfo) throws UserException createTableInfo.getDbName(), createTableInfo.getTableName()); Env.getCurrentEnv().getEditLog().logCreateTable(info); - LOG.info("finished to create table {}.{}.{}", getName(), createTableInfo.getDbName(), + LOG.info("Finished creating table {}.{}.{}", getName(), createTableInfo.getDbName(), createTableInfo.getTableName()); } return res; diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/InternalCatalog.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/InternalCatalog.java index a3c3dbad86c4f4..5e175d82a03e78 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/datasource/InternalCatalog.java +++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/InternalCatalog.java @@ -463,7 +463,7 @@ public void replayCreateDb(Database db, String newDbName) { @Override public void dropDb(String dbName, boolean ifExists, boolean force) throws DdlException { - LOG.info("begin drop database[{}], is force : {}", dbName, force); + LOG.info("Begin to drop database[{}], is force : {}", dbName, force); // 1. check if database exists if (!tryLock(false)) { @@ -539,7 +539,7 @@ public void dropDb(String dbName, boolean ifExists, boolean force) throws DdlExc unlock(); } - LOG.info("finish drop database[{}], is force : {}", dbName, force); + LOG.info("Finished dropping database[{}], is force : {}", dbName, force); } public void unprotectDropDb(Database db, boolean isForeDrop, boolean isReplay, long recycleTime) @@ -851,7 +851,7 @@ public void dropTable(String dbName, String tableName, boolean isView, boolean i boolean ifExists, boolean mustTemporary, boolean force) throws DdlException { Map costTimes = new TreeMap(); StopWatch watch = StopWatch.createStarted(); - LOG.info("begin to drop table: {} from db: {}, is force: {}", tableName, dbName, force); + LOG.info("Begin to drop table: {} from db: {}, is force: {}", tableName, dbName, force); // check database Database db = getDbOrDdlException(dbName); @@ -936,7 +936,7 @@ public void dropTable(String dbName, String tableName, boolean isView, boolean i } watch.stop(); costTimes.put("6:total", watch.getTime()); - LOG.info("finished dropping table: {} from db: {}, is view: {}, is force: {}, cost: {}", + LOG.info("Finished dropping table: {} from db: {}, is view: {}, is force: {}, cost: {}", tableName, dbName, isView, force, costTimes); } @@ -1022,7 +1022,7 @@ public boolean unprotectDropTable(Database db, Table table, boolean isForceDrop, StopWatch watch = StopWatch.createStarted(); Env.getCurrentRecycleBin().recycleTable(db.getId(), table, isReplay, isForceDrop, recycleTime); watch.stop(); - LOG.info("finished dropping table[{}] in db[{}] recycleTable cost: {}ms", + LOG.info("Finished dropping table[{}] in db[{}] recycleTable cost: {}ms", table.getName(), db.getFullName(), watch.getTime()); return true; } @@ -1672,7 +1672,7 @@ public void addPartition(Database db, String tableName, AddPartitionOp addPartit // create partition outside db lock if (LOG.isDebugEnabled()) { - LOG.debug("start creating table[{}] partition[{}] (owner), temp={}", tableName, partitionName, + LOG.debug("Start creating table[{}] partition[{}] (owner), temp={}", tableName, partitionName, isTempPartition); } DataProperty dataProperty = singlePartitionDesc.getPartitionDataProperty(); @@ -2260,7 +2260,7 @@ public void checkAvailableCapacity(Database db) throws DdlException { private boolean createOlapTable(Database db, CreateTableInfo createTableInfo) throws UserException { String tableName = createTableInfo.getTableName(); if (LOG.isDebugEnabled()) { - LOG.debug("begin create olap table: {}", tableName); + LOG.debug("Begin to create olap table: {}", tableName); } String tableShowName = tableName; if (createTableInfo.isTemp()) { @@ -3481,7 +3481,7 @@ public void truncateTable(String dbName, String tableName, PartitionNamesInfo pa // if table currently has no partitions, this sql like empty command and do nothing, should return directly. // but if truncate whole table, the temporary partitions also need drop if (origPartitions.isEmpty() && (!truncateEntireTable || olapTable.getAllTempPartitions().isEmpty())) { - LOG.info("finished to truncate table {}.{}, no partition contains data, do nothing", + LOG.info("Finished truncating table {}.{}, no partition contains data, do nothing", dbName, tableName); return; } @@ -3645,7 +3645,7 @@ public void truncateTable(String dbName, String tableName, PartitionNamesInfo pa } Env.getCurrentEnv().getAnalysisManager().dropStats(olapTable, partitionNamesInfo); - LOG.info("finished to truncate table {}.{}, partitions: {}", dbName, tableName, partitionNamesInfo); + LOG.info("Finished truncating table {}.{}, partitions: {}", dbName, tableName, partitionNamesInfo); } private List truncateTableInternal(OlapTable olapTable, List newPartitions, @@ -3819,7 +3819,7 @@ public long loadDb(DataInputStream dis, long checksum) throws IOException, DdlEx } // ATTN: this should be done after load Db, and before loadAlterJob recreateTabletInvertIndex(); - LOG.info("finished replay databases from image"); + LOG.info("Finished replaying databases from image"); return newChecksum; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/jdbc/client/JdbcClient.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/jdbc/client/JdbcClient.java index 7fa454d7813c5a..0981626199727d 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/datasource/jdbc/client/JdbcClient.java +++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/jdbc/client/JdbcClient.java @@ -246,7 +246,7 @@ public void executeStmt(String origStmt) { stmt = conn.createStatement(); int effectedRows = stmt.executeUpdate(origStmt); if (LOG.isDebugEnabled()) { - LOG.debug("finished to execute dml stmt: {}, effected rows: {}", origStmt, effectedRows); + LOG.debug("Finished executing DML stmt: {}, affected rows: {}", origStmt, effectedRows); } } catch (SQLException e) { throw new JdbcClientException("Failed to execute stmt. error: " + e.getMessage(), e); diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/kafka/KafkaUtil.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/kafka/KafkaUtil.java index a097f052aa9dea..cc552aab67478a 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/datasource/kafka/KafkaUtil.java +++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/kafka/KafkaUtil.java @@ -76,7 +76,7 @@ public static List> getOffsetsForTimes(String brokerList, St Map convertedCustomProperties, List> timestampOffsets) throws LoadException { if (LOG.isDebugEnabled()) { - LOG.debug("begin to get offsets for times of topic: {}, {}", topic, timestampOffsets); + LOG.debug("Begin to get offsets for times of topic: {}, {}", topic, timestampOffsets); } try { InternalService.PKafkaMetaProxyRequest.Builder metaRequestBuilder = @@ -108,7 +108,7 @@ public static List> getOffsetsForTimes(String brokerList, St partitionOffsets.add(Pair.of(pair.getKey(), pair.getVal())); } if (LOG.isDebugEnabled()) { - LOG.debug("finish to get offsets for times of topic: {}, {}", topic, partitionOffsets); + LOG.debug("Finished getting offsets for times of topic: {}, {}", topic, partitionOffsets); } return partitionOffsets; } catch (Exception e) { @@ -122,7 +122,7 @@ public static List> getLatestOffsets(long jobId, UUID taskId Map convertedCustomProperties, List partitionIds) throws LoadException { if (LOG.isDebugEnabled()) { - LOG.debug("begin to get latest offsets for partitions {} in topic: {}, task {}, job {}", + LOG.debug("Begin to get latest offsets for partitions {} in topic: {}, task {}, job {}", partitionIds, topic, taskId, jobId); } try { @@ -153,7 +153,7 @@ public static List> getLatestOffsets(long jobId, UUID taskId partitionOffsets.add(Pair.of(pair.getKey(), pair.getVal())); } if (LOG.isDebugEnabled()) { - LOG.debug("finish to get latest offsets for partitions {} in topic: {}, task {}, job {}", + LOG.debug("Finished getting latest offsets for partitions {} in topic: {}, task {}, job {}", partitionOffsets, topic, taskId, jobId); } return partitionOffsets; @@ -213,7 +213,7 @@ public static List> getRealOffsets(String brokerList, String partitionOffsets.add(Pair.of(pair.getKey(), pair.getVal())); } realOffsets.addAll(partitionOffsets); - LOG.info("finish to get real offsets for partitions {} in topic: {}", realOffsets, topic); + LOG.info("Finished getting real offsets for partitions {} in topic: {}", realOffsets, topic); return realOffsets; } catch (Exception e) { LOG.warn("failed to get real offsets.", e); diff --git a/fe/fe-core/src/main/java/org/apache/doris/deploy/impl/LocalFileDeployManager.java b/fe/fe-core/src/main/java/org/apache/doris/deploy/impl/LocalFileDeployManager.java index 72a3fd773d80c5..e1eaf9ceb0b747 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/deploy/impl/LocalFileDeployManager.java +++ b/fe/fe-core/src/main/java/org/apache/doris/deploy/impl/LocalFileDeployManager.java @@ -82,7 +82,7 @@ protected void initEnvVariables(String envElectableFeServiceGroup, String envObs public List getGroupHostInfos(NodeType nodeType) { String groupName = nodeTypeAttrMap.get(nodeType).getServiceName(); List result = Lists.newArrayList(); - LOG.info("begin to get group: {} from file: {}", groupName, clusterInfoFile); + LOG.info("Begin to get group: {} from file: {}", groupName, clusterInfoFile); FileChannel channel = null; FileLock lock = null; diff --git a/fe/fe-core/src/main/java/org/apache/doris/fs/operations/BrokerFileOperations.java b/fe/fe-core/src/main/java/org/apache/doris/fs/operations/BrokerFileOperations.java index 68ec50c18af5dd..6b6dafcf685ba0 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/fs/operations/BrokerFileOperations.java +++ b/fe/fe-core/src/main/java/org/apache/doris/fs/operations/BrokerFileOperations.java @@ -97,7 +97,7 @@ public Status closeReader(OpParams opParams) { + " for fd: " + fd); } - LOG.info("finished to close reader. fd: {}.", fd); + LOG.info("Finished closing reader. fd: {}.", fd); } catch (TException e) { return new Status(Status.ErrCode.BAD_CONNECTION, "failed to close reader on broker " @@ -126,7 +126,7 @@ public Status openWriter(OpParams desc) { fd.setHigh(rep.getFd().getHigh()); fd.setLow(rep.getFd().getLow()); - LOG.info("finished to open writer. fd: {}. directly upload to remote path {}.", fd, remoteFile); + LOG.info("Finished opening writer. fd: {}. directly upload to remote path {}.", fd, remoteFile); } catch (TException e) { return new Status(Status.ErrCode.BAD_CONNECTION, "failed to open writer on broker " @@ -150,7 +150,7 @@ public Status closeWriter(OpParams desc) { + " for fd: " + fd); } - LOG.info("finished to close writer. fd: {}.", fd); + LOG.info("Finished closing writer. fd: {}.", fd); } catch (TException e) { return new Status(Status.ErrCode.BAD_CONNECTION, "failed to close writer on broker " diff --git a/fe/fe-core/src/main/java/org/apache/doris/fs/operations/HDFSFileOperations.java b/fe/fe-core/src/main/java/org/apache/doris/fs/operations/HDFSFileOperations.java index 44563317bc27bb..3854bb64c5ddc4 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/fs/operations/HDFSFileOperations.java +++ b/fe/fe-core/src/main/java/org/apache/doris/fs/operations/HDFSFileOperations.java @@ -124,7 +124,7 @@ public Status closeWriter(OpParams opParams) { try { fsDataOutputStream.flush(); fsDataOutputStream.close(); - LOG.info("finished to close writer"); + LOG.info("Finished closing writer"); } catch (IOException e) { LOG.error("errors while close file output stream", e); return new Status(Status.ErrCode.COMMON_ERROR, "failed to close writer, msg:" + e.getMessage()); diff --git a/fe/fe-core/src/main/java/org/apache/doris/fs/remote/BrokerFileSystem.java b/fe/fe-core/src/main/java/org/apache/doris/fs/remote/BrokerFileSystem.java index 224381f10d1de4..4c9d25c3d49d8f 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/fs/remote/BrokerFileSystem.java +++ b/fe/fe-core/src/main/java/org/apache/doris/fs/remote/BrokerFileSystem.java @@ -175,7 +175,7 @@ public Status downloadWithFileSize(String remoteFilePath, String localFilePath, if (!opStatus.ok()) { return opStatus; } - LOG.info("finished to open reader. fd: {}. download {} to {}.", + LOG.info("Finished opening reader. fd: {}. download {} to {}.", fd, remoteFilePath, localFilePath); Preconditions.checkNotNull(fd); // 3. delete local file if exist @@ -313,7 +313,7 @@ public Status downloadWithFileSize(String remoteFilePath, String localFilePath, } } - LOG.info("finished to download from {} to {} with size: {}. cost {} ms", + LOG.info("Finished downloading from {} to {} with size: {}. cost {} ms", remoteFilePath, localFilePath, fileSize, (System.currentTimeMillis() - start)); return status; } @@ -481,7 +481,7 @@ public Status upload(String localPath, String remotePath) { } if (status.ok()) { - LOG.info("finished to upload {} to remote path {}. cost: {} ms", + LOG.info("Finished uploading {} to remote path {}. cost: {} ms", localPath, remotePath, (System.currentTimeMillis() - start)); } return status; @@ -522,7 +522,7 @@ public Status rename(String origFilePath, String destFilePath) { } } - LOG.info("finished to rename {} to {}. cost: {} ms", + LOG.info("Finished renaming {} to {}. cost: {} ms", origFilePath, destFilePath, (System.currentTimeMillis() - start)); return Status.OK; } @@ -549,7 +549,7 @@ public Status delete(String remotePath) { + ", broker: " + BrokerUtil.printBroker(name, address)); } - LOG.info("finished to delete remote path {}.", remotePath); + LOG.info("Finished deleting remote path {}.", remotePath); } catch (TException e) { needReturn = false; return new Status(Status.ErrCode.COMMON_ERROR, @@ -596,7 +596,7 @@ public Status listFiles(String remotePath, boolean recursive, List r tFile.getBlockSize(), tFile.getModificationTime(), null /* blockLocations is null*/); result.add(file); } - LOG.info("finished to listLocatedFiles, remote path {}. get files: {}", remotePath, result); + LOG.info("Finished listing located files, remote path {}. get files: {}", remotePath, result); return Status.OK; } catch (TException e) { needReturn = false; @@ -632,7 +632,7 @@ public boolean isSplittable(String remotePath, String inputFormat) throws UserEx + operationStatus.getMessage() + ", broker: " + BrokerUtil.printBroker(name, address)); } boolean result = response.isSplittable(); - LOG.info("finished to get path isSplittable, remote path {} with format {}, isSplittable: {}", + LOG.info("Finished getting path isSplittable, remote path {} with format {}, isSplittable: {}", remotePath, inputFormat, result); return result; } catch (TException e) { @@ -678,7 +678,7 @@ public Status globList(String remotePath, List result, boolean fileN RemoteFile file = new RemoteFile(tFile.path, !tFile.isDir, tFile.size, 0, tFile.getModificationTime()); result.add(file); } - LOG.info("finished to list remote path {}. get files: {}", remotePath, result); + LOG.info("Finished listing remote path {}. get files: {}", remotePath, result); } catch (TException e) { needReturn = false; return new Status(Status.ErrCode.COMMON_ERROR, diff --git a/fe/fe-core/src/main/java/org/apache/doris/fs/remote/ObjFileSystem.java b/fe/fe-core/src/main/java/org/apache/doris/fs/remote/ObjFileSystem.java index f04fb6fa01b418..dd8fcfc2f7c64e 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/fs/remote/ObjFileSystem.java +++ b/fe/fe-core/src/main/java/org/apache/doris/fs/remote/ObjFileSystem.java @@ -91,7 +91,7 @@ public Status downloadWithFileSize(String remoteFilePath, String localFilePath, } if (localFile.length() == fileSize) { LOG.info( - "finished to get file from {} to {} with size: {}. cost {} ms", + "Finished getting file from {} to {} with size: {}. cost {} ms", remoteFilePath, localFile.toPath(), fileSize, diff --git a/fe/fe-core/src/main/java/org/apache/doris/fs/remote/dfs/DFSFileSystem.java b/fe/fe-core/src/main/java/org/apache/doris/fs/remote/dfs/DFSFileSystem.java index 2ee3c15693130a..4d2ffce629eb8e 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/fs/remote/dfs/DFSFileSystem.java +++ b/fe/fe-core/src/main/java/org/apache/doris/fs/remote/dfs/DFSFileSystem.java @@ -200,7 +200,7 @@ public Status downloadWithFileSize(String remoteFilePath, String localFilePath, return st; } FSDataInputStream fsDataInputStream = hdfsOpParams.fsDataInputStream(); - LOG.info("finished to open reader. download {} to {}.", remoteFilePath, localFilePath); + LOG.info("Finished opening reader. download {} to {}.", remoteFilePath, localFilePath); // delete local file if exist File localFile = new File(localFilePath); @@ -266,7 +266,7 @@ public Status downloadWithFileSize(String remoteFilePath, String localFilePath, } } - LOG.info("finished to download from {} to {} with size: {}. cost {} ms", remoteFilePath, localFilePath, + LOG.info("Finished downloading from {} to {} with size: {}. cost {} ms", remoteFilePath, localFilePath, fileSize, (System.currentTimeMillis() - start)); return status; } @@ -370,7 +370,7 @@ public Status directUpload(String content, String remoteFile) { return wst; } FSDataOutputStream fsDataOutputStream = hdfsOpParams.fsDataOutputStream(); - LOG.info("finished to open writer. directly upload to remote path {}.", remoteFile); + LOG.info("Finished opening writer. directly upload to remote path {}.", remoteFile); Status status = Status.OK; try { @@ -402,7 +402,7 @@ public Status upload(String localPath, String remotePath) { return wst; } FSDataOutputStream fsDataOutputStream = hdfsOpParams.fsDataOutputStream(); - LOG.info("finished to open writer. directly upload to remote path {}.", remotePath); + LOG.info("Finished opening writer. directly upload to remote path {}.", remotePath); // read local file and write remote File localFile = new File(localPath); long fileLength = localFile.length(); @@ -447,7 +447,7 @@ public Status upload(String localPath, String remotePath) { } if (status.ok()) { - LOG.info("finished to upload {} to remote path {}. cost: {} ms", localPath, remotePath, + LOG.info("Finished uploading {} to remote path {}. cost: {} ms", localPath, remotePath, (System.currentTimeMillis() - start)); } return status; @@ -477,7 +477,7 @@ public Status rename(String srcPath, String destPath) { return new Status(Status.ErrCode.COMMON_ERROR, "failed to rename remote " + srcPath + " to " + destPath + ", msg: " + e.getMessage()); } - LOG.info("finished to rename {} to {}. cost: {} ms", srcPath, destPath, (System.currentTimeMillis() - start)); + LOG.info("Finished renaming {} to {}. cost: {} ms", srcPath, destPath, (System.currentTimeMillis() - start)); return Status.OK; } @@ -495,7 +495,7 @@ public Status delete(String remotePath) { return new Status(Status.ErrCode.COMMON_ERROR, "failed to delete remote path: " + remotePath + ", msg: " + e.getMessage()); } - LOG.info("finished to delete remote path {}.", remotePath); + LOG.info("Finished deleting remote path {}.", remotePath); return Status.OK; } @@ -532,7 +532,7 @@ public Status globList(String remotePath, List result, boolean fileN LOG.warn("errors while get file status ", e); return new Status(Status.ErrCode.COMMON_ERROR, "errors while get file status " + e.getMessage()); } - LOG.info("finish list path {}", remotePath); + LOG.info("Finished listing path {}", remotePath); return Status.OK; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/job/executor/DefaultTaskExecutorHandler.java b/fe/fe-core/src/main/java/org/apache/doris/job/executor/DefaultTaskExecutorHandler.java index cdfe7c0fe08f63..5e330ff13c1687 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/job/executor/DefaultTaskExecutorHandler.java +++ b/fe/fe-core/src/main/java/org/apache/doris/job/executor/DefaultTaskExecutorHandler.java @@ -46,7 +46,7 @@ public void onEvent(ExecuteTaskEvent executeTaskEvent) { log.info("task is canceled, ignore. task id is {}", task.getTaskId()); return; } - log.info("start to execute task, task id is {}", task.getTaskId()); + log.info("Start executing task, task id is {}", task.getTaskId()); task.runTask(); } catch (Exception e) { log.error("execute task error, task id is {}", executeTaskEvent.getTask().getTaskId(), e); diff --git a/fe/fe-core/src/main/java/org/apache/doris/job/extensions/insert/streaming/StreamingInsertTask.java b/fe/fe-core/src/main/java/org/apache/doris/job/extensions/insert/streaming/StreamingInsertTask.java index 635c639256c276..a02bda517e2a21 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/job/extensions/insert/streaming/StreamingInsertTask.java +++ b/fe/fe-core/src/main/java/org/apache/doris/job/extensions/insert/streaming/StreamingInsertTask.java @@ -109,7 +109,7 @@ public void run() throws JobException { log.info("task has been canceled, task id is {}", getTaskId()); return; } - log.info("start to run streaming insert task, label {}, offset is {}", labelName, runningOffset.toString()); + log.info("Start running streaming insert task, label {}, offset is {}", labelName, runningOffset.toString()); String errMsg = null; try { taskCommand.run(ctx, stmtExecutor); diff --git a/fe/fe-core/src/main/java/org/apache/doris/job/extensions/insert/streaming/StreamingMultiTblTask.java b/fe/fe-core/src/main/java/org/apache/doris/job/extensions/insert/streaming/StreamingMultiTblTask.java index 32526f9c513d6c..5744e5cab3f349 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/job/extensions/insert/streaming/StreamingMultiTblTask.java +++ b/fe/fe-core/src/main/java/org/apache/doris/job/extensions/insert/streaming/StreamingMultiTblTask.java @@ -124,7 +124,7 @@ public void run() throws JobException { private void sendWriteRequest() throws JobException { Backend backend = StreamingJobUtils.selectBackend(); - log.info("start to run streaming multi task {} in backend {}/{}, offset is {}", + log.info("Start running streaming multi task {} in backend {}/{}, offset is {}", taskId, backend.getId(), backend.getHost(), runningOffset.toString()); this.runningBackendId = backend.getId(); WriteRecordRequest params = buildRequestParams(); diff --git a/fe/fe-core/src/main/java/org/apache/doris/journal/bdbje/BDBEnvironment.java b/fe/fe-core/src/main/java/org/apache/doris/journal/bdbje/BDBEnvironment.java index d2caabdec2797f..869b40ff99bdd9 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/journal/bdbje/BDBEnvironment.java +++ b/fe/fe-core/src/main/java/org/apache/doris/journal/bdbje/BDBEnvironment.java @@ -325,11 +325,11 @@ public void removeDatabase(String dbName) { index++; } if (targetDbName != null) { - LOG.info("begin to remove database {} from openedDatabases", targetDbName); + LOG.info("Begin to remove database {} from openedDatabases", targetDbName); openedDatabases.remove(index); } try { - LOG.info("begin to remove database {} from replicatedEnvironment", dbName); + LOG.info("Begin to remove database {} from replicatedEnvironment", dbName); // the first parameter null means auto-commit replicatedEnvironment.removeDatabase(null, dbName); } catch (DatabaseNotFoundException e) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/StreamLoadRecordMgr.java b/fe/fe-core/src/main/java/org/apache/doris/load/StreamLoadRecordMgr.java index 2f2d7818024c89..94dc6768684b22 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/load/StreamLoadRecordMgr.java +++ b/fe/fe-core/src/main/java/org/apache/doris/load/StreamLoadRecordMgr.java @@ -349,7 +349,7 @@ protected void runAfterCatalogReady() { } } } - LOG.info("finished to pull stream load records of all backends. record size: {}, cost: {} ms", + LOG.info("Finished pulling stream load records of all backends. record size: {}, cost: {} ms", pullRecordSize, (System.currentTimeMillis() - start)); if (pullRecordSize > 0) { FetchStreamLoadRecord fetchStreamLoadRecord = new FetchStreamLoadRecord(beIdToLastStreamLoad); diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/BrokerLoadPendingTask.java b/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/BrokerLoadPendingTask.java index cdf09b65c37ef9..90bcc926bf9ac7 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/BrokerLoadPendingTask.java +++ b/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/BrokerLoadPendingTask.java @@ -56,7 +56,7 @@ public BrokerLoadPendingTask(BrokerLoadJob loadTaskCallback, @Override public void executeTask() throws UserException { - LOG.info("begin to execute broker pending task. job: {}", callback.getCallbackId()); + LOG.info("Begin to execute broker pending task. job: {}", callback.getCallbackId()); getAllFileStatus(); ((BrokerLoadJob) callback).beginTxn(); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/LoadJob.java b/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/LoadJob.java index 947ed64244d4a7..355e4488c391cf 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/LoadJob.java +++ b/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/LoadJob.java @@ -579,7 +579,7 @@ protected void unprotectedExecuteRetry(FailMsg failMsg) { try { LOG.debug(new LogBuilder(LogKey.LOAD_JOB, id) .add("label", label) - .add("msg", "begin to abort txn") + .add("msg", "Begin to abort txn") .build()); Env.getCurrentGlobalTransactionMgr().abortTransaction(dbId, label, failMsg.getMsg()); } catch (UserException e) { @@ -641,7 +641,7 @@ protected void unprotectedExecuteCancel(FailMsg failMsg, boolean abortTxn) { if (LOG.isDebugEnabled()) { LOG.debug(new LogBuilder(LogKey.LOAD_JOB, id) .add("transaction_id", transactionId) - .add("msg", "begin to abort txn") + .add("msg", "Begin to abort txn") .build()); } Env.getCurrentGlobalTransactionMgr().abortTransaction(dbId, transactionId, failMsg.getMsg()); diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/LoadLoadingTask.java b/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/LoadLoadingTask.java index 2e9acff237b67d..6e273e14a5bf50 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/LoadLoadingTask.java +++ b/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/LoadLoadingTask.java @@ -144,7 +144,7 @@ public TUniqueId getLoadId() { @Override protected void executeTask() throws Exception { - LOG.info("begin to execute loading task. load id: {} job id: {}. db: {}, tbl: {}. left retry: {}", + LOG.info("Begin to execute loading task. load id: {} job id: {}. db: {}, tbl: {}. left retry: {}", DebugUtil.printId(loadId), callback.getCallbackId(), db.getFullName(), table.getName(), retryTime); retryTime--; @@ -202,7 +202,7 @@ private void actualExecute(Coordinator curCoordinator, int waitSecond) throws Ex LOG.debug(new LogBuilder(LogKey.LOAD_JOB, callback.getCallbackId()) .add("task_id", signature) .add("query_id", DebugUtil.printId(curCoordinator.getQueryId())) - .add("msg", "begin to execute plan") + .add("msg", "Begin to execute plan") .build()); } curCoordinator.exec(); diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/LoadManager.java b/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/LoadManager.java index 3af3529177e163..798253b77da430 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/LoadManager.java +++ b/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/LoadManager.java @@ -776,7 +776,7 @@ private void cleanLabelInternal(long dbId, String label, boolean isReplay) { LOG.warn("Exception:", e); } - LOG.info("finished to clean {} labels on db {} with label '{}' in load mgr. is replay: {}", + LOG.info("Finished cleaning {} labels on db {} with label '{}' in load mgr. is replay: {}", counter, dbId, label, isReplay); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/routineload/RoutineLoadManager.java b/fe/fe-core/src/main/java/org/apache/doris/load/routineload/RoutineLoadManager.java index 3b1f498bcfcf84..eb50328d4d9ebe 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/load/routineload/RoutineLoadManager.java +++ b/fe/fe-core/src/main/java/org/apache/doris/load/routineload/RoutineLoadManager.java @@ -773,7 +773,7 @@ public void processTimeoutTasks() { // Cancelled and stopped job will be removed after Configure.label_keep_max_second seconds public void cleanOldRoutineLoadJobs() { if (LOG.isDebugEnabled()) { - LOG.debug("begin to clean old routine load jobs "); + LOG.debug("Begin to clean old routine load jobs "); } clearRoutineLoadJobIf(RoutineLoadJob::isExpired); } @@ -791,7 +791,7 @@ public void cleanOverLimitRoutineLoadJobs() { writeLock(); try { if (LOG.isDebugEnabled()) { - LOG.debug("begin to clean routine load jobs"); + LOG.debug("Begin to clean routine load jobs"); } Deque finishedJobs = idToRoutineLoadJob .values() diff --git a/fe/fe-core/src/main/java/org/apache/doris/master/Checkpoint.java b/fe/fe-core/src/main/java/org/apache/doris/master/Checkpoint.java index 0d21fa8094c8c4..bc3b878315e1a1 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/master/Checkpoint.java +++ b/fe/fe-core/src/main/java/org/apache/doris/master/Checkpoint.java @@ -152,7 +152,7 @@ public synchronized void doCheckpoint() throws CheckpointException { // generate new image file long replayedJournalId = -1; - LOG.info("begin to generate new image: image.{}", checkPointVersion); + LOG.info("Begin to generate new image: image.{}", checkPointVersion); env = Env.getCurrentEnv(); env.setEditLog(editLog); createStaticFieldForCkpt(); diff --git a/fe/fe-core/src/main/java/org/apache/doris/master/ReportHandler.java b/fe/fe-core/src/main/java/org/apache/doris/master/ReportHandler.java index cf7df808eda14d..40ff54ca7a47bb 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/master/ReportHandler.java +++ b/fe/fe-core/src/main/java/org/apache/doris/master/ReportHandler.java @@ -691,7 +691,7 @@ public void tabletReport(long backendId, Map backendTablets, } long end = System.currentTimeMillis(); - LOG.info("finished to handle tablet report from backend[{}] cost: {} ms", backendId, (end - start)); + LOG.info("Finished handling tablet report from backend[{}] cost: {} ms", backendId, (end - start)); } private static void debugBlock() { @@ -712,7 +712,7 @@ private static void taskReport(long backendId, Map> running long numRunningTasks) { debugBlock(); if (LOG.isDebugEnabled()) { - LOG.debug("begin to handle task report from backend {}", backendId); + LOG.debug("Begin to handle task report from backend {}", backendId); } long start = System.currentTimeMillis(); @@ -759,7 +759,7 @@ private static void taskReport(long backendId, Map> running AgentTaskExecutor.submit(batchTask); } - LOG.info("finished to handle task report from backend {}-{}, " + LOG.info("Finished handling task report from backend {}-{}, " + "diff task num: {}, publishTaskSize: {}, runningTasks: {}, cost: {} ms.", backendId, be != null ? be.getHost() : "", batchTask.getTaskNum(), publishTaskSize, runningTasks.entrySet().stream() @@ -769,7 +769,7 @@ private static void taskReport(long backendId, Map> running } private static void diskReport(long backendId, Map backendDisks) { - LOG.info("begin to handle disk report from backend {}", backendId); + LOG.info("Begin to handle disk report from backend {}", backendId); long start = System.currentTimeMillis(); Backend backend = Env.getCurrentSystemInfo().getBackend(backendId); if (backend == null) { @@ -780,12 +780,12 @@ private static void diskReport(long backendId, Map backendDisks) .map(disk -> "path=" + disk.getRootPath() + ", path hash=" + disk.getPathHash()) .collect(Collectors.toList()); backend.updateDisks(backendDisks); - LOG.info("finished to handle disk report from backend: {}, disk size: {}, bad disk: {}, cost: {} ms", + LOG.info("Finished handling disk report from backend: {}, disk size: {}, bad disk: {}, cost: {} ms", backendId, backendDisks.size(), badDisks, (System.currentTimeMillis() - start)); } private static void cpuReport(long backendId, int cpuCores, int pipelineExecutorSize) { - LOG.info("begin to handle cpu report from backend {}", backendId); + LOG.info("Begin to handle cpu report from backend {}", backendId); long start = System.currentTimeMillis(); Backend backend = Env.getCurrentSystemInfo().getBackend(backendId); if (backend == null) { @@ -799,7 +799,7 @@ private static void cpuReport(long backendId, int cpuCores, int pipelineExecutor // log change Env.getCurrentEnv().getEditLog().logBackendStateChange(backend); } - LOG.info("finished to handle cpu report from backend {}, cost: {} ms", + LOG.info("Finished handling cpu report from backend {}, cost: {} ms", backendId, (System.currentTimeMillis() - start)); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/Auth.java b/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/Auth.java index 63262aaf0b493b..1dbb6b825036ab 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/Auth.java +++ b/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/Auth.java @@ -551,7 +551,7 @@ private void createUserInternal(UserIdentity userIdent, String roleName, byte[] roleName, passwordOptions, comment, userId); Env.getCurrentEnv().getEditLog().logCreateUser(privInfo); } - LOG.info("finished to create user: {}, is replay: {}", userIdent, isReplay); + LOG.info("Finished creating user: {}, is replay: {}", userIdent, isReplay); } finally { writeUnlock(); } @@ -601,7 +601,7 @@ private void dropUserInternal(UserIdentity userIdent, boolean ignoreIfNonExists, if (!isReplay) { Env.getCurrentEnv().getEditLog().logNewDropUser(userIdent); } - LOG.info("finished to drop user: {}, is replay: {}", userIdent.getQualifiedUser(), isReplay); + LOG.info("Finished dropping user: {}, is replay: {}", userIdent.getQualifiedUser(), isReplay); } finally { writeUnlock(); } @@ -709,7 +709,7 @@ private void grantInternal(UserIdentity userIdent, String role, TablePattern tbl PrivInfo info = new PrivInfo(userIdent, tblPattern, privs, null, role, colPrivileges); Env.getCurrentEnv().getEditLog().logGrantPriv(info); } - LOG.info("finished to grant privilege. is replay: {}", isReplay); + LOG.info("Finished granting privilege. is replay: {}", isReplay); } finally { writeUnlock(); } @@ -761,7 +761,7 @@ private void grantInternal(UserIdentity userIdent, String role, ResourcePattern PrivInfo info = new PrivInfo(userIdent, resourcePattern, privs, null, role); Env.getCurrentEnv().getEditLog().logGrantPriv(info); } - LOG.info("finished to grant resource privilege. is replay: {}", isReplay); + LOG.info("Finished granting resource privilege. is replay: {}", isReplay); } finally { writeUnlock(); } @@ -792,7 +792,7 @@ private void grantInternal(UserIdentity userIdent, String role, WorkloadGroupPat PrivInfo info = new PrivInfo(userIdent, workloadGroupPattern, privs, null, role); Env.getCurrentEnv().getEditLog().logGrantPriv(info); } - LOG.info("finished to grant workload group privilege. is replay: {}", isReplay); + LOG.info("Finished granting workload group privilege. is replay: {}", isReplay); } finally { writeUnlock(); } @@ -816,7 +816,7 @@ private void grantInternal(UserIdentity userIdent, List roles, boolean i PrivInfo info = new PrivInfo(userIdent, roles); Env.getCurrentEnv().getEditLog().logGrantPriv(info); } - LOG.info("finished to grant role privilege. is replay: {}", isReplay); + LOG.info("Finished granting role privilege. is replay: {}", isReplay); } finally { writeUnlock(); } @@ -907,7 +907,7 @@ private void revokeInternal(UserIdentity userIdent, String role, TablePattern tb PrivInfo info = new PrivInfo(userIdent, tblPattern, privs, null, role, colPrivileges); Env.getCurrentEnv().getEditLog().logRevokePriv(info); } - LOG.info("finished to revoke privilege. is replay: {}", isReplay); + LOG.info("Finished revoking privilege. is replay: {}", isReplay); } finally { writeUnlock(); } @@ -928,7 +928,7 @@ private void revokeInternal(UserIdentity userIdent, String role, ResourcePattern PrivInfo info = new PrivInfo(userIdent, resourcePattern, privs, null, role); Env.getCurrentEnv().getEditLog().logRevokePriv(info); } - LOG.info("finished to revoke privilege. is replay: {}", isReplay); + LOG.info("Finished revoking privilege. is replay: {}", isReplay); } finally { writeUnlock(); } @@ -949,7 +949,7 @@ private void revokeInternal(UserIdentity userIdent, String role, WorkloadGroupPa PrivInfo info = new PrivInfo(userIdent, workloadGroupPattern, privs, null, role); Env.getCurrentEnv().getEditLog().logRevokePriv(info); } - LOG.info("finished to revoke privilege. is replay: {}", isReplay); + LOG.info("Finished revoking privilege. is replay: {}", isReplay); } finally { writeUnlock(); } @@ -973,7 +973,7 @@ private void revokeInternal(UserIdentity userIdent, List roles, boolean PrivInfo info = new PrivInfo(userIdent, roles); Env.getCurrentEnv().getEditLog().logRevokePriv(info); } - LOG.info("finished to revoke role privilege. is replay: {}", isReplay); + LOG.info("Finished revoking role privilege. is replay: {}", isReplay); } finally { writeUnlock(); } @@ -1017,19 +1017,19 @@ public void setPasswordInternal(UserIdentity userIdent, byte[] password, UserIde } finally { writeUnlock(); } - LOG.info("finished to set password for {}. is replay: {}", userIdent, isReplay); + LOG.info("Finished setting password for {}. is replay: {}", userIdent, isReplay); } public void setLdapPassword(String ldapPassword) { ldapInfo = new LdapInfo(ldapPassword); Env.getCurrentEnv().getEditLog().logSetLdapPassword(ldapInfo); - LOG.info("finished to set ldap password."); + LOG.info("Finished setting LDAP password."); } public void replaySetLdapPassword(LdapInfo info) { ldapInfo = info; if (LOG.isDebugEnabled()) { - LOG.debug("finish replaying ldap admin password."); + LOG.debug("Finished replaying ldap admin password."); } } @@ -1092,7 +1092,7 @@ private void createRoleInternal(String role, boolean ignoreIfExists, String comm } finally { writeUnlock(); } - LOG.info("finished to create role: {}, is replay: {}", role, isReplay); + LOG.info("Finished creating role: {}, is replay: {}", role, isReplay); } public void dropRole(String role, boolean ignoreIfNonExists) throws DdlException { @@ -1124,7 +1124,7 @@ private void dropRoleInternal(String role, boolean ignoreIfNonExists, boolean is } finally { writeUnlock(); } - LOG.info("finished to drop role: {}, is replay: {}", role, isReplay); + LOG.info("Finished dropping role: {}, is replay: {}", role, isReplay); } public Set getRoleUsers(String roleName) { @@ -1153,7 +1153,7 @@ public void updateUserPropertyInternal(String user, List> p UserPropertyInfo propertyInfo = new UserPropertyInfo(user, properties); Env.getCurrentEnv().getEditLog().logUpdateUserProperty(propertyInfo); } - LOG.info("finished to set properties for user: {}", user); + LOG.info("Finished setting properties for user: {}", user); } catch (DdlException e) { if (isReplay && e.getMessage().contains("Unknown user property")) { LOG.warn("ReplayUpdateUserProperty failed, maybe FE rolled back version, " + e.getMessage()); diff --git a/fe/fe-core/src/main/java/org/apache/doris/persist/meta/MetaReader.java b/fe/fe-core/src/main/java/org/apache/doris/persist/meta/MetaReader.java index 8024105fe26211..3e6c03c238339c 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/persist/meta/MetaReader.java +++ b/fe/fe-core/src/main/java/org/apache/doris/persist/meta/MetaReader.java @@ -69,7 +69,7 @@ public class MetaReader { private static final Logger LOG = LogManager.getLogger(MetaReader.class); public static void read(File imageFile, Env env) throws IOException, DdlException { - LOG.info("start load image from {}. is ckpt: {}", imageFile.getAbsolutePath(), Env.isCheckpointThread()); + LOG.info("Start loading image from {}. is ckpt: {}", imageFile.getAbsolutePath(), Env.isCheckpointThread()); long loadImageStartTime = System.currentTimeMillis(); MetaHeader metaHeader = MetaHeader.read(imageFile); MetaFooter metaFooter = MetaFooter.read(imageFile); @@ -133,6 +133,6 @@ public static void read(File imageFile, Env env) throws IOException, DdlExceptio Preconditions.checkState(remoteChecksum == checksum, remoteChecksum + " vs. " + checksum); long loadImageEndTime = System.currentTimeMillis(); - LOG.info("finished to load image in " + (loadImageEndTime - loadImageStartTime) + " ms"); + LOG.info("Finished loading image in " + (loadImageEndTime - loadImageStartTime) + " ms"); } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/persist/meta/MetaWriter.java b/fe/fe-core/src/main/java/org/apache/doris/persist/meta/MetaWriter.java index a58e7afacc4f33..37a6220e91f98a 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/persist/meta/MetaWriter.java +++ b/fe/fe-core/src/main/java/org/apache/doris/persist/meta/MetaWriter.java @@ -95,7 +95,7 @@ public long doWork(String name, WriteMethod method) throws IOException { public static void write(File imageFile, Env env) throws IOException { // save image does not need any lock. because only checkpoint thread will call this method. - LOG.info("start to save image to {}. is ckpt: {}", + LOG.info("Start saving image to {}. is ckpt: {}", imageFile.getAbsolutePath(), Env.isCheckpointThread()); final Reference checksum = new Reference<>(0L); long saveImageStartTime = System.currentTimeMillis(); @@ -130,7 +130,7 @@ public static void write(File imageFile, Env env) throws IOException { MetaFooter.write(imageFile, metaIndices, checksum.getRef()); long saveImageEndTime = System.currentTimeMillis(); - LOG.info("finished save image {} in {} ms. checksum is {}, size is {}", imageFile.getAbsolutePath(), + LOG.info("Finished saving image {} in {} ms. checksum is {}, size is {}", imageFile.getAbsolutePath(), (saveImageEndTime - saveImageStartTime), checksum.getRef(), imageFile.length()); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/StmtExecutor.java b/fe/fe-core/src/main/java/org/apache/doris/qe/StmtExecutor.java index 8dd8ebe83602af..f50e55b1bd865c 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/qe/StmtExecutor.java +++ b/fe/fe-core/src/main/java/org/apache/doris/qe/StmtExecutor.java @@ -556,7 +556,7 @@ public void execute() throws Exception { UUID uuid = UUID.randomUUID(); TUniqueId queryId = new TUniqueId(uuid.getMostSignificantBits(), uuid.getLeastSignificantBits()); if (Config.enable_print_request_before_execution) { - LOG.info("begin to execute query {} {}", + LOG.info("Begin to execute query {} {}", DebugUtil.printId(queryId), originStmt == null ? "null" : originStmt.originStmt); } queryRetry(queryId); diff --git a/fe/fe-core/src/main/java/org/apache/doris/rpc/BackendServiceProxy.java b/fe/fe-core/src/main/java/org/apache/doris/rpc/BackendServiceProxy.java index fbdfb3cf223a70..3d10c3483bd1e3 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/rpc/BackendServiceProxy.java +++ b/fe/fe-core/src/main/java/org/apache/doris/rpc/BackendServiceProxy.java @@ -103,7 +103,7 @@ public BackendServiceClientExtIp(String realIp, BackendServiceClient client) { } public void removeProxy(TNetworkAddress address) { - LOG.warn("begin to remove proxy: {}", address); + LOG.warn("Begin to remove proxy: {}", address); BackendServiceClientExtIp serviceClientExtIp; lock.lock(); try { diff --git a/fe/fe-core/src/main/java/org/apache/doris/system/SystemInfoService.java b/fe/fe-core/src/main/java/org/apache/doris/system/SystemInfoService.java index 0a5abcf9516d1b..2f0e5a8e473ba0 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/system/SystemInfoService.java +++ b/fe/fe-core/src/main/java/org/apache/doris/system/SystemInfoService.java @@ -225,7 +225,7 @@ private void addBackend(String host, int heartbeatPort, Map tagM // log Env.getCurrentEnv().getEditLog().logAddBackend(newBackend); - LOG.info("finished to add {} ", newBackend); + LOG.info("Finished adding {} ", newBackend); // backends is changed, regenerated tablet number metrics MetricRepo.generateBackendsTabletMetrics(); @@ -284,7 +284,7 @@ public void dropBackend(String host, int heartbeatPort) throws DdlException { // log Env.getCurrentEnv().getEditLog().logDropBackend(droppedBackend); - LOG.info("finished to drop {}", droppedBackend); + LOG.info("Finished dropping {}", droppedBackend); // backends is changed, regenerated tablet number metrics MetricRepo.generateBackendsTabletMetrics(); @@ -1016,7 +1016,7 @@ public void modifyBackends(ModifyBackendOp op) throws UserException { if (shouldModify) { Env.getCurrentEnv().getEditLog().logModifyBackend(be); - LOG.info("finished to modify backend {} ", be); + LOG.info("Finished modifying backend {} ", be); } } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/task/AgentBoundedBatchTask.java b/fe/fe-core/src/main/java/org/apache/doris/task/AgentBoundedBatchTask.java index f6cd5fd009ac60..cf698a8e59d5e7 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/task/AgentBoundedBatchTask.java +++ b/fe/fe-core/src/main/java/org/apache/doris/task/AgentBoundedBatchTask.java @@ -87,7 +87,7 @@ public void addTask(AgentTask agentTask) { @Override public void run() { int taskNum = getTaskNum(); - LOG.info("begin to submit tasks to BE. total {} tasks, be task concurrency: {}", taskNum, taskConcurrency); + LOG.info("Begin to submit tasks to BE. total {} tasks, be task concurrency: {}", taskNum, taskConcurrency); boolean submitFinished = false; while (getSubmitTaskNum() < taskNum && !submitFinished) { for (Long backendId : backendIdToTasks.keySet()) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/transaction/DatabaseTransactionMgr.java b/fe/fe-core/src/main/java/org/apache/doris/transaction/DatabaseTransactionMgr.java index b0a17790170791..008198f2383a52 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/transaction/DatabaseTransactionMgr.java +++ b/fe/fe-core/src/main/java/org/apache/doris/transaction/DatabaseTransactionMgr.java @@ -1902,7 +1902,7 @@ public void abortTransaction(long transactionId, String reason, TxnCommitAttachm } public void abortTransaction2PC(long transactionId) throws UserException { - LOG.info("begin to abort txn {}", transactionId); + LOG.info("Begin to abort txn {}", transactionId); if (transactionId < 0) { LOG.info("transaction id is {}, less than 0, maybe this is an old type load job," + " ignore abort operation", transactionId);