BACKPORT: timekeeping: Use proper clock specifier names in functions

This makes boot uniformly boottime and tai uniformly clocktai, to
address the remaining oversights.

Change-Id: I3463b9045bddeba00d6f9fcf78d63008459c1b9a
Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Arnd Bergmann <arnd@arndb.de>
Link: https://lkml.kernel.org/r/20190621203249.3909-2-Jason@zx2c4.com
This commit is contained in:
Jason A. Donenfeld
2019-06-21 22:32:48 +02:00
committed by bengris32
parent e15abee38e
commit 2b4e979a7a
33 changed files with 76 additions and 76 deletions

View File

@@ -65,7 +65,7 @@ different format depending on what is required by the user:
.. c:function:: u64 ktime_get_ns( void )
u64 ktime_get_boottime_ns( void )
u64 ktime_get_real_ns( void )
u64 ktime_get_tai_ns( void )
u64 ktime_get_clocktai_ns( void )
u64 ktime_get_raw_ns( void )
Same as the plain ktime_get functions, but returning a u64 number

View File

@@ -264,10 +264,10 @@ static int kvm_pmu_rdpmc_vmware(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
ctr_val = rdtsc();
break;
case VMWARE_BACKDOOR_PMC_REAL_TIME:
ctr_val = ktime_get_boot_ns();
ctr_val = ktime_get_boottime_ns();
break;
case VMWARE_BACKDOOR_PMC_APPARENT_TIME:
ctr_val = ktime_get_boot_ns() +
ctr_val = ktime_get_boottime_ns() +
vcpu->kvm->arch.kvmclock_offset;
break;
default:

View File

@@ -1722,7 +1722,7 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
offset = kvm_compute_tsc_offset(vcpu, data);
ns = ktime_get_boot_ns();
ns = ktime_get_boottime_ns();
elapsed = ns - kvm->arch.last_tsc_nsec;
if (vcpu->arch.virtual_tsc_khz) {
@@ -2064,7 +2064,7 @@ u64 get_kvmclock_ns(struct kvm *kvm)
spin_lock(&ka->pvclock_gtod_sync_lock);
if (!ka->use_master_clock) {
spin_unlock(&ka->pvclock_gtod_sync_lock);
return ktime_get_boot_ns() + ka->kvmclock_offset;
return ktime_get_boottime_ns() + ka->kvmclock_offset;
}
hv_clock.tsc_timestamp = ka->master_cycle_now;
@@ -2080,7 +2080,7 @@ u64 get_kvmclock_ns(struct kvm *kvm)
&hv_clock.tsc_to_system_mul);
ret = __pvclock_read_cycles(&hv_clock, rdtsc());
} else
ret = ktime_get_boot_ns() + ka->kvmclock_offset;
ret = ktime_get_boottime_ns() + ka->kvmclock_offset;
put_cpu();
@@ -2179,7 +2179,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
}
if (!use_master_clock) {
host_tsc = rdtsc();
kernel_ns = ktime_get_boot_ns();
kernel_ns = ktime_get_boottime_ns();
}
tsc_timestamp = kvm_read_l1_tsc(v, host_tsc);
@@ -8836,7 +8836,7 @@ int kvm_arch_hardware_enable(void)
* before any KVM threads can be running. Unfortunately, we can't
* bring the TSCs fully up to date with real time, as we aren't yet far
* enough into CPU bringup that we know how much real time has actually
* elapsed; our helper function, ktime_get_boot_ns() will be using boot
* elapsed; our helper function, ktime_get_boottime_ns() will be using boot
* variables that haven't been updated yet.
*
* So we simply find the maximum observed TSC above, then record the
@@ -9067,7 +9067,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
mutex_init(&kvm->arch.apic_map_lock);
spin_lock_init(&kvm->arch.pvclock_gtod_sync_lock);
kvm->arch.kvmclock_offset = -ktime_get_boot_ns();
kvm->arch.kvmclock_offset = -ktime_get_boottime_ns();
pvclock_update_vm_gtod_copy(kvm);
kvm->arch.guest_can_read_msr_platform_info = true;

View File

@@ -824,7 +824,7 @@ static int kfd_ioctl_get_clock_counters(struct file *filep,
/* No access to rdtsc. Using raw monotonic time */
args->cpu_clock_counter = ktime_get_raw_ns();
args->system_clock_counter = ktime_get_boot_ns();
args->system_clock_counter = ktime_get_boottime_ns();
/* Since the counter is in nano-seconds we use 1GHz frequency */
args->system_clock_freq = 1000000000;

View File

@@ -158,7 +158,7 @@ static int dht11_decode(struct dht11 *dht11, int offset)
return -EIO;
}
dht11->timestamp = ktime_get_boot_ns();
dht11->timestamp = ktime_get_boottime_ns();
if (hum_int < 4) { /* DHT22: 100000 = (3*256+232)*100 */
dht11->temperature = (((temp_int & 0x7f) << 8) + temp_dec) *
((temp_int & 0x80) ? -100 : 100);
@@ -186,7 +186,7 @@ static irqreturn_t dht11_handle_irq(int irq, void *data)
/* TODO: Consider making the handler safe for IRQ sharing */
if (dht11->num_edges < DHT11_EDGES_PER_READ && dht11->num_edges >= 0) {
dht11->edges[dht11->num_edges].ts = ktime_get_boot_ns();
dht11->edges[dht11->num_edges].ts = ktime_get_boottime_ns();
dht11->edges[dht11->num_edges++].value =
gpio_get_value(dht11->gpio);
@@ -205,7 +205,7 @@ static int dht11_read_raw(struct iio_dev *iio_dev,
int ret, timeres, offset;
mutex_lock(&dht11->lock);
if (dht11->timestamp + DHT11_DATA_VALID_TIME < ktime_get_boot_ns()) {
if (dht11->timestamp + DHT11_DATA_VALID_TIME < ktime_get_boottime_ns()) {
timeres = ktime_get_resolution_ns();
dev_dbg(dht11->dev, "current timeresolution: %dns\n", timeres);
if (timeres > DHT11_MIN_TIMERES) {
@@ -331,7 +331,7 @@ static int dht11_probe(struct platform_device *pdev)
return -EINVAL;
}
dht11->timestamp = ktime_get_boot_ns() - DHT11_DATA_VALID_TIME - 1;
dht11->timestamp = ktime_get_boottime_ns() - DHT11_DATA_VALID_TIME - 1;
dht11->num_edges = -1;
platform_set_drvdata(pdev, iio);

View File

@@ -225,9 +225,9 @@ s64 iio_get_time_ns(const struct iio_dev *indio_dev)
ktime_get_coarse_ts64(&tp);
return timespec64_to_ns(&tp);
case CLOCK_BOOTTIME:
return ktime_get_boot_ns();
return ktime_get_boottime_ns();
case CLOCK_TAI:
return ktime_get_tai_ns();
return ktime_get_clocktai_ns();
default:
BUG();
}

View File

@@ -310,7 +310,7 @@ static void aliasguid_query_handler(int status,
if (status) {
pr_debug("(port: %d) failed: status = %d\n",
cb_ctx->port, status);
rec->time_to_run = ktime_get_boot_ns() + 1 * NSEC_PER_SEC;
rec->time_to_run = ktime_get_boottime_ns() + 1 * NSEC_PER_SEC;
goto out;
}
@@ -416,7 +416,7 @@ next_entry:
be64_to_cpu((__force __be64)rec->guid_indexes),
be64_to_cpu((__force __be64)applied_guid_indexes),
be64_to_cpu((__force __be64)declined_guid_indexes));
rec->time_to_run = ktime_get_boot_ns() +
rec->time_to_run = ktime_get_boottime_ns() +
resched_delay_sec * NSEC_PER_SEC;
} else {
rec->status = MLX4_GUID_INFO_STATUS_SET;
@@ -709,7 +709,7 @@ static int get_low_record_time_index(struct mlx4_ib_dev *dev, u8 port,
}
}
if (resched_delay_sec) {
u64 curr_time = ktime_get_boot_ns();
u64 curr_time = ktime_get_boottime_ns();
*resched_delay_sec = (low_record_time < curr_time) ? 0 :
div_u64((low_record_time - curr_time), NSEC_PER_SEC);

View File

@@ -73,7 +73,7 @@ static void led_activity_function(struct timer_list *t)
* down to 16us, ensuring we won't overflow 32-bit computations below
* even up to 3k CPUs, while keeping divides cheap on smaller systems.
*/
curr_boot = ktime_get_boot_ns() * cpus;
curr_boot = ktime_get_boottime_ns() * cpus;
diff_boot = (curr_boot - activity_data->last_boot) >> 16;
diff_used = (curr_used - activity_data->last_used) >> 16;
activity_data->last_boot = curr_boot;

View File

@@ -281,18 +281,18 @@ int scp_sys_full_reset(void)
pr_notice("[SCP] %s\n", __func__);
#if SCP_RESERVED_MEM && IS_ENABLED(CONFIG_OF_RESERVED_MEM)
restore_start = ktime_get_boot_ns();
restore_start = ktime_get_boottime_ns();
if (scpreg.secure_dump) {
#if SCP_SECURE_DUMP_MEASURE
memset(scpdump_cal, 0x0, sizeof(scpdump_cal));
scpdump_cal[0].start = ktime_get_boot_ns();
scpdump_cal[1].start = ktime_get_boot_ns();
scpdump_cal[0].start = ktime_get_boottime_ns();
scpdump_cal[1].start = ktime_get_boottime_ns();
#endif
scp_restore_l2tcm();
#if SCP_SECURE_DUMP_MEASURE
scpdump_cal[1].end = ktime_get_boot_ns();
scpdump_cal[1].end = ktime_get_boottime_ns();
#endif
} else {
#else
@@ -311,21 +311,21 @@ int scp_sys_full_reset(void)
#if SCP_RESERVED_MEM && IS_ENABLED(CONFIG_OF_RESERVED_MEM)
if (scpreg.secure_dump) {
#if SCP_SECURE_DUMP_MEASURE
scpdump_cal[2].start = ktime_get_boot_ns();
scpdump_cal[2].start = ktime_get_boottime_ns();
#endif
tmp = (void *)(scp_ap_dram_virt +
ROUNDUP(scp_region_info_copy.ap_dram_size, 1024)
* scpreg.core_nums);
scp_restore_dram();
#if SCP_SECURE_DUMP_MEASURE
scpdump_cal[2].end = ktime_get_boot_ns();
scpdump_cal[0].end = ktime_get_boot_ns();
scpdump_cal[2].end = ktime_get_boottime_ns();
scpdump_cal[0].end = ktime_get_boottime_ns();
for (idx = 0; idx < 3; idx++) {
pr_notice("MDebug SCP Cal:%d %lldns\n", idx,
(scpdump_cal[idx].end - scpdump_cal[idx].start));
}
#endif
restore_end = ktime_get_boot_ns();
restore_end = ktime_get_boottime_ns();
pr_notice("[SCP] Restore: %lld ns\n", (restore_end - restore_start));
} else {
#else

View File

@@ -441,23 +441,23 @@ static unsigned int scp_crash_dump(enum scp_core_id id)
memset(scpdump_cal, 0x0, sizeof(scpdump_cal));
idx = 0;
scpdump_cal[0].type = 1;
scpdump_cal[0].start = ktime_get_boot_ns();
scpdump_cal[0].start = ktime_get_boottime_ns();
#endif
dump_start = ktime_get_boot_ns();
dump_start = ktime_get_boottime_ns();
{
int polling = 1;
int retry = POLLING_RETRY;
#if SCP_SECURE_DUMP_MEASURE
idx++;
scpdump_cal[idx].type = 2;
scpdump_cal[idx].start = ktime_get_boot_ns();
scpdump_cal[idx].start = ktime_get_boottime_ns();
#endif
scp_do_dump();
#if SCP_SECURE_DUMP_MEASURE
scpdump_cal[idx].end = ktime_get_boot_ns();
scpdump_cal[idx].end = ktime_get_boottime_ns();
#endif
while (polling != 0 && retry > 0) {
@@ -466,13 +466,13 @@ static unsigned int scp_crash_dump(enum scp_core_id id)
break;
idx++;
scpdump_cal[idx].type = 3;
scpdump_cal[idx].start = ktime_get_boot_ns();
scpdump_cal[idx].start = ktime_get_boottime_ns();
#endif
polling = scp_do_polling();
#if SCP_SECURE_DUMP_MEASURE
scpdump_cal[idx].end = ktime_get_boot_ns();
scpdump_cal[idx].end = ktime_get_boottime_ns();
#endif
if (!polling)
@@ -488,11 +488,11 @@ static unsigned int scp_crash_dump(enum scp_core_id id)
#endif
}
}
dump_end = ktime_get_boot_ns();
dump_end = ktime_get_boottime_ns();
pr_notice("[SCP] Dump: %lld ns\n", (dump_end - dump_start));
#if SCP_SECURE_DUMP_MEASURE
scpdump_cal[0].end = ktime_get_boot_ns();
scpdump_cal[0].end = ktime_get_boottime_ns();
for (idx = 0; idx < POLLING_RETRY; idx++) {
if (scpdump_cal[idx].type == 0)
break;

View File

@@ -114,7 +114,7 @@ static int hf_manager_report_event(struct hf_client *client,
spin_lock_irqsave(&hf_fifo->buffer_lock, flags);
if (unlikely(hf_fifo->buffull == true)) {
hang_time = ktime_get_boot_ns() - hf_fifo->hang_begin;
hang_time = ktime_get_boottime_ns() - hf_fifo->hang_begin;
if (hang_time >= max_hang_time) {
/* reset buffer */
hf_fifo->buffull = false;
@@ -161,9 +161,9 @@ static int hf_manager_report_event(struct hf_client *client,
hang_time = hf_fifo->hang_begin -
hf_fifo->client_active;
if (hang_time < max_hang_time)
hf_fifo->hang_begin = ktime_get_boot_ns();
hf_fifo->hang_begin = ktime_get_boottime_ns();
} else {
hf_fifo->hang_begin = ktime_get_boot_ns();
hf_fifo->hang_begin = ktime_get_boottime_ns();
}
}
spin_unlock_irqrestore(&hf_fifo->buffer_lock, flags);
@@ -245,7 +245,7 @@ static enum hrtimer_restart hf_manager_io_poll(struct hrtimer *timer)
(struct hf_manager *)container_of(timer,
struct hf_manager, io_poll_timer);
hf_manager_sched_sample(manager, ktime_get_boot_ns());
hf_manager_sched_sample(manager, ktime_get_boottime_ns());
hrtimer_forward_now(&manager->io_poll_timer,
ns_to_ktime(atomic64_read(&manager->io_poll_interval)));
return HRTIMER_RESTART;
@@ -569,7 +569,7 @@ static inline void hf_manager_save_update_enable(struct hf_client *client,
old->start_time = request->start_time;
/* update new */
if (!request->enable)
request->start_time = ktime_get_boot_ns();
request->start_time = ktime_get_boottime_ns();
request->enable = true;
request->down_sample = cmd->down_sample;
request->delay = batch->delay;
@@ -1236,7 +1236,7 @@ static int fetch_next(struct hf_client_fifo *hf_fifo,
*event = hf_fifo->buffer[hf_fifo->tail++];
hf_fifo->tail &= hf_fifo->bufsize - 1;
hf_fifo->buffull = false;
hf_fifo->client_active = ktime_get_boot_ns();
hf_fifo->client_active = ktime_get_boottime_ns();
}
spin_unlock_irqrestore(&hf_fifo->buffer_lock, flags);
return have_event;

View File

@@ -95,7 +95,7 @@ static void lsm6dsm_sample_complete(void *ctx)
(driver_dev->async_rx_buffer[4]));
coordinate_map(driver_dev->direction, data);
memset(&event, 0, sizeof(struct hf_manager_event));
event.timestamp = ktime_get_boot_ns();
event.timestamp = ktime_get_boottime_ns();
event.sensor_type = SENSOR_TYPE_GYRO_SECONDARY;
event.accurancy = SENSOR_ACCURANCY_HIGH;
event.action = DATA_ACTION;

View File

@@ -344,7 +344,7 @@ static void mtk_nanohub_moving_average(union SCP_SENSOR_HUB_DATA *rsp)
* return;
*}
*/
ap_now_time = ktime_get_boot_ns();
ap_now_time = ktime_get_boottime_ns();
arch_counter = arch_counter_get_cntvct();
scp_raw_time = rsp->notify_rsp.scp_timestamp;
ipi_transfer_time = arch_counter_to_ns(arch_counter -
@@ -892,7 +892,7 @@ static int mtk_nanohub_send_timestamp_wake_locked(void)
/* send_timestamp_to_hub is process context, disable irq is safe */
local_irq_disable();
now_time = ktime_get_boot_ns();
now_time = ktime_get_boottime_ns();
arch_counter = arch_counter_get_cntvct();
local_irq_enable();
req.set_config_req.sensorType = 0;
@@ -2483,12 +2483,12 @@ static int mtk_nanohub_pm_event(struct notifier_block *notifier,
{
switch (pm_event) {
case PM_POST_SUSPEND:
pr_debug("resume ap boottime=%lld\n", ktime_get_boot_ns());
pr_debug("resume ap boottime=%lld\n", ktime_get_boottime_ns());
WRITE_ONCE(rtc_compensation_suspend, false);
mtk_nanohub_send_timestamp_to_hub();
return NOTIFY_DONE;
case PM_SUSPEND_PREPARE:
pr_debug("suspend ap boottime=%lld\n", ktime_get_boot_ns());
pr_debug("suspend ap boottime=%lld\n", ktime_get_boottime_ns());
WRITE_ONCE(rtc_compensation_suspend, true);
return NOTIFY_DONE;
default:

View File

@@ -312,7 +312,7 @@ int nanohub_comms_rx_retrans_boottime(struct nanohub_data *data,
do {
data->comms.open(data);
boottime = ktime_get_boot_ns();
boottime = ktime_get_boottime_ns();
packet_size =
packet_create(&pad->packet, seq, cmd, sizeof(boottime),
(u8 *)&boottime, false);

View File

@@ -40,7 +40,7 @@ static void test_work_func(struct work_struct *work)
ctrl->length = sizeof(*timesync);
timesync = (struct sensor_comm_timesync *)ctrl->data;
local_irq_disable();
timesync->host_timestamp = ktime_get_boot_ns();
timesync->host_timestamp = ktime_get_boottime_ns();
timesync->host_archcounter = arch_counter_get_cntvct();
local_irq_enable();
ret = sensor_comm_ctrl_send(ctrl, sizeof(*ctrl) + ctrl->length);

View File

@@ -742,7 +742,7 @@ int acc_data_report(struct acc_data *data)
*/
if (event.reserved == 1)
mark_timestamp(ID_ACCELEROMETER, DATA_REPORT,
ktime_get_boot_ns(), event.time_stamp);
ktime_get_boottime_ns(), event.time_stamp);
err = sensor_input_event(acc_context_obj->mdev.minor, &event);
return err;
}

View File

@@ -771,7 +771,7 @@ int gyro_data_report(struct gyro_data *data)
event.reserved = data->reserved[0];
if (event.reserved == 1)
mark_timestamp(ID_GYROSCOPE, DATA_REPORT, ktime_get_boot_ns(),
mark_timestamp(ID_GYROSCOPE, DATA_REPORT, ktime_get_boottime_ns(),
event.time_stamp);
err = sensor_input_event(gyro_context_obj->mdev.minor, &event);
return err;

View File

@@ -676,7 +676,7 @@ int mag_data_report(struct mag_data *data)
event.reserved = data->reserved[0];
if (event.reserved == 1)
mark_timestamp(ID_MAGNETIC, DATA_REPORT, ktime_get_boot_ns(),
mark_timestamp(ID_MAGNETIC, DATA_REPORT, ktime_get_boottime_ns(),
event.time_stamp);
err = sensor_input_event(mag_context_obj->mdev.minor, &event);
return err;

View File

@@ -535,7 +535,7 @@ static int SCP_sensorHub_direct_push_work(void *data)
if (ret)
continue;
WRITE_ONCE(chre_kthread_wait_condition, false);
mark_timestamp(0, WORK_START, ktime_get_boot_ns(), 0);
mark_timestamp(0, WORK_START, ktime_get_boottime_ns(), 0);
SCP_sensorHub_read_wp_queue();
}
return 0;
@@ -623,7 +623,7 @@ static void SCP_sensorHub_moving_average(union SCP_SENSOR_HUB_DATA *rsp)
* return;
* }
*/
ap_now_time = ktime_get_boot_ns();
ap_now_time = ktime_get_boottime_ns();
arch_counter = arch_counter_get_cntvct();
scp_raw_time = rsp->notify_rsp.scp_timestamp;
ipi_transfer_time = arch_counter_to_ns(arch_counter -
@@ -645,7 +645,7 @@ static void SCP_sensorHub_notify_cmd(union SCP_SENSOR_HUB_DATA *rsp,
switch (rsp->notify_rsp.event) {
case SCP_DIRECT_PUSH:
case SCP_FIFO_FULL:
mark_timestamp(0, GOT_IPI, ktime_get_boot_ns(), 0);
mark_timestamp(0, GOT_IPI, ktime_get_boottime_ns(), 0);
mark_ipi_timestamp(arch_counter_get_cntvct() -
rsp->notify_rsp.arch_counter);
#ifdef DEBUG_PERFORMANCE_HW_TICK
@@ -1258,7 +1258,7 @@ static int sensor_send_timestamp_wake_locked(void)
/* send_timestamp_to_hub is process context, disable irq is safe */
local_irq_disable();
now_time = ktime_get_boot_ns();
now_time = ktime_get_boottime_ns();
arch_counter = arch_counter_get_cntvct();
local_irq_enable();
req.set_config_req.sensorType = 0;
@@ -1341,7 +1341,7 @@ int sensor_enable_to_hub(uint8_t handle, int enabledisable)
mSensorState[sensor_type].enable = enabledisable;
if (enabledisable)
atomic64_set(&mSensorState[sensor_type].enableTime,
ktime_get_boot_ns());
ktime_get_boottime_ns());
init_sensor_config_cmd(&cmd, sensor_type);
if (atomic_read(&power_status) == SENSOR_POWER_UP) {
ret = nanohub_external_write((const uint8_t *)&cmd,
@@ -2529,12 +2529,12 @@ static int sensorHub_pm_event(struct notifier_block *notifier,
{
switch (pm_event) {
case PM_POST_SUSPEND:
pr_debug("resume ap boottime=%lld\n", ktime_get_boot_ns());
pr_debug("resume ap boottime=%lld\n", ktime_get_boottime_ns());
WRITE_ONCE(rtc_compensation_suspend, false);
sensor_send_timestamp_to_hub();
return NOTIFY_DONE;
case PM_SUSPEND_PREPARE:
pr_debug("suspend ap boottime=%lld\n", ktime_get_boot_ns());
pr_debug("suspend ap boottime=%lld\n", ktime_get_boottime_ns());
WRITE_ONCE(rtc_compensation_suspend, true);
return NOTIFY_DONE;
default:

View File

@@ -127,7 +127,7 @@ uint64_t archcounter_timesync_to_boot(uint64_t hwclock)
spin_lock(&moving_average_lock);
local_irq_save(flags);
base_time = ktime_get_boot_ns();
base_time = ktime_get_boottime_ns();
archcounter_time = arch_counter_to_ns(arch_counter_get_cntvct());
local_irq_restore(flags);
@@ -171,7 +171,7 @@ static void timesync_test_work_func(struct work_struct *work)
local_irq_save(flags);
base_time = ktime_get_boot_ns();
base_time = ktime_get_boottime_ns();
archcounter_time = arch_counter_to_ns(arch_counter_get_cntvct());
local_irq_restore(flags);

View File

@@ -565,7 +565,7 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
if (unlikely(ieee80211_is_beacon(hdr->frame_control) ||
ieee80211_is_probe_resp(hdr->frame_control)))
rx_status->boottime_ns = ktime_get_boot_ns();
rx_status->boottime_ns = ktime_get_boottime_ns();
/* Take a reference briefly to kick off a d0i3 entry delay so
* we can handle bursts of RX packets without toggling the

View File

@@ -1465,7 +1465,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
if (unlikely(ieee80211_is_beacon(hdr->frame_control) ||
ieee80211_is_probe_resp(hdr->frame_control)))
rx_status->boottime_ns = ktime_get_boot_ns();
rx_status->boottime_ns = ktime_get_boottime_ns();
}
if (iwl_mvm_create_skb(mvm, skb, hdr, len, crypt_len, rxb)) {

View File

@@ -1869,7 +1869,7 @@ void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, u32 *gp2, u64 *boottime)
}
*gp2 = iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG);
*boottime = ktime_get_boot_ns();
*boottime = ktime_get_boottime_ns();
if (!ps_disabled) {
mvm->ps_disabled = ps_disabled;

View File

@@ -1399,7 +1399,7 @@ static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw,
*/
if (ieee80211_is_beacon(hdr->frame_control) ||
ieee80211_is_probe_resp(hdr->frame_control)) {
rx_status.boottime_ns = ktime_get_boot_ns();
rx_status.boottime_ns = ktime_get_boottime_ns();
now = data->abs_bcn_ts;
} else {
now = mac80211_hwsim_get_tsf_raw();

View File

@@ -496,7 +496,7 @@ static int wlcore_fw_status(struct wl1271 *wl, struct wl_fw_status *status)
}
/* update the host-chipset time offset */
wl->time_offset = (ktime_get_boot_ns() >> 10) -
wl->time_offset = (ktime_get_boottime_ns() >> 10) -
(s64)(status->fw_localtime);
wl->fw_fast_lnk_map = status->link_fast_bitmap;

View File

@@ -107,7 +107,7 @@ static void wl1271_rx_status(struct wl1271 *wl,
}
if (beacon || probe_rsp)
status->boottime_ns = ktime_get_boot_ns();
status->boottime_ns = ktime_get_boottime_ns();
if (beacon)
wlcore_set_pending_regdomain_ch(wl, (u16)desc->channel,

View File

@@ -287,7 +287,7 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif,
}
/* configure packet life time */
hosttime = (ktime_get_boot_ns() >> 10);
hosttime = (ktime_get_boottime_ns() >> 10);
desc->start_time = cpu_to_le32(hosttime - wl->time_offset);
is_dummy = wl12xx_is_dummy_packet(wl, skb);

View File

@@ -179,7 +179,7 @@ static void virt_wifi_scan_result(struct work_struct *work)
scan_result.work);
struct wiphy *wiphy = priv_to_wiphy(priv);
struct cfg80211_scan_info scan_info = { .aborted = false };
u64 tsf = div_u64(ktime_get_boot_ns(), 1000);
u64 tsf = div_u64(ktime_get_boottime_ns(), 1000);
informed_bss = cfg80211_inform_bss(wiphy, &channel_5ghz,
CFG80211_BSS_FTYPE_PRESP,

View File

@@ -159,12 +159,12 @@ static inline u64 ktime_get_real_ns(void)
return ktime_to_ns(ktime_get_real());
}
static inline u64 ktime_get_boot_ns(void)
static inline u64 ktime_get_boottime_ns(void)
{
return ktime_to_ns(ktime_get_boottime());
}
static inline u64 ktime_get_tai_ns(void)
static inline u64 ktime_get_clocktai_ns(void)
{
return ktime_to_ns(ktime_get_clocktai());
}

View File

@@ -2021,7 +2021,7 @@ enum cfg80211_signal_type {
* received by the device (not just by the host, in case it was
* buffered on the device) and be accurate to about 10ms.
* If the frame isn't buffered, just passing the return value of
* ktime_get_boot_ns() is likely appropriate.
* ktime_get_boottime_ns() is likely appropriate.
* @parent_tsf: the time at the start of reception of the first octet of the
* timestamp field of the frame. The time is the TSF of the BSS specified
* by %parent_bssid.

View File

@@ -1682,7 +1682,7 @@ static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr)
if (err < 0)
goto free_prog;
prog->aux->load_time = ktime_get_boot_ns();
prog->aux->load_time = ktime_get_boottime_ns();
err = bpf_obj_name_cpy(prog->aux->name, attr->prog_name);
if (err)
goto free_prog;

View File

@@ -10569,11 +10569,11 @@ static int perf_event_set_clock(struct perf_event *event, clockid_t clk_id)
break;
case CLOCK_BOOTTIME:
event->clock = &ktime_get_boot_ns;
event->clock = &ktime_get_boottime_ns;
break;
case CLOCK_TAI:
event->clock = &ktime_get_tai_ns;
event->clock = &ktime_get_clocktai_ns;
break;
default:

View File

@@ -2114,7 +2114,7 @@ static __latent_entropy struct task_struct *copy_process(
*/
p->start_time = ktime_get_ns();
p->real_start_time = ktime_get_boot_ns();
p->real_start_time = ktime_get_boottime_ns();
/*
* Make it visible to the rest of the system, but dont wake it up yet.