/* This file is part of the Emulex RoCE Device Driver for * RoCE (RDMA over Converged Ethernet) adapters. * Copyright (C) 2012-2015 Emulex. All rights reserved. * EMULEX and SLI are trademarks of Emulex. * www.emulex.com * * This software is available to you under a choice of one of two licenses. * You may choose to be licensed under the terms of the GNU General Public * License (GPL) Version 2, available from the file COPYING in the main * directory of this source tree, or the BSD license below: * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * - Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Contact Information: * linux-drivers@emulex.com * * Emulex * 3333 Susan Street * Costa Mesa, CA 92626 */ #include #include #include "ocrdma_stats.h" static struct dentry *ocrdma_dbgfs_dir; static int ocrdma_add_stat(char *start, char *pcur, char *name, u64 count) { char buff[128] = {0}; int cpy_len = 0; snprintf(buff, 128, "%s: %llu\n", name, count); cpy_len = strlen(buff); if (pcur + cpy_len > start + OCRDMA_MAX_DBGFS_MEM) { pr_err("%s: No space in stats buff\n", __func__); return 0; } memcpy(pcur, buff, cpy_len); return cpy_len; } static bool ocrdma_alloc_stats_mem(struct ocrdma_dev *dev) { struct stats_mem *mem = &dev->stats_mem; /* Alloc mbox command mem*/ mem->size = max_t(u32, sizeof(struct ocrdma_rdma_stats_req), sizeof(struct ocrdma_rdma_stats_resp)); mem->va = dma_alloc_coherent(&dev->nic_info.pdev->dev, mem->size, &mem->pa, GFP_KERNEL); if (!mem->va) { pr_err("%s: stats mbox allocation failed\n", __func__); return false; } memset(mem->va, 0, mem->size); /* Alloc debugfs mem */ mem->debugfs_mem = kzalloc(OCRDMA_MAX_DBGFS_MEM, GFP_KERNEL); if (!mem->debugfs_mem) { pr_err("%s: stats debugfs mem allocation failed\n", __func__); return false; } return true; } static void ocrdma_release_stats_mem(struct ocrdma_dev *dev) { struct stats_mem *mem = &dev->stats_mem; if (mem->va) dma_free_coherent(&dev->nic_info.pdev->dev, mem->size, mem->va, mem->pa); kfree(mem->debugfs_mem); } static char *ocrdma_resource_stats(struct ocrdma_dev *dev) { char *stats = dev->stats_mem.debugfs_mem, *pcur; struct ocrdma_rdma_stats_resp *rdma_stats = (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va; struct ocrdma_rsrc_stats *rsrc_stats = &rdma_stats->act_rsrc_stats; memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM)); pcur = stats; pcur += ocrdma_add_stat(stats, pcur, "active_dpp_pds", (u64)rsrc_stats->dpp_pds); pcur += ocrdma_add_stat(stats, pcur, "active_non_dpp_pds", (u64)rsrc_stats->non_dpp_pds); pcur += ocrdma_add_stat(stats, pcur, "active_rc_dpp_qps", (u64)rsrc_stats->rc_dpp_qps); pcur += ocrdma_add_stat(stats, pcur, "active_uc_dpp_qps", (u64)rsrc_stats->uc_dpp_qps); pcur += ocrdma_add_stat(stats, pcur, "active_ud_dpp_qps", (u64)rsrc_stats->ud_dpp_qps); pcur += ocrdma_add_stat(stats, pcur, "active_rc_non_dpp_qps", (u64)rsrc_stats->rc_non_dpp_qps); pcur += ocrdma_add_stat(stats, pcur, "active_uc_non_dpp_qps", (u64)rsrc_stats->uc_non_dpp_qps); pcur += ocrdma_add_stat(stats, pcur, "active_ud_non_dpp_qps", (u64)rsrc_stats->ud_non_dpp_qps); pcur += ocrdma_add_stat(stats, pcur, "active_srqs", (u64)rsrc_stats->srqs); pcur += ocrdma_add_stat(stats, pcur, "active_rbqs", (u64)rsrc_stats->rbqs); pcur += ocrdma_add_stat(stats, pcur, "active_64K_nsmr", (u64)rsrc_stats->r64K_nsmr); pcur += ocrdma_add_stat(stats, pcur, "active_64K_to_2M_nsmr", (u64)rsrc_stats->r64K_to_2M_nsmr); pcur += ocrdma_add_stat(stats, pcur, "active_2M_to_44M_nsmr", (u64)rsrc_stats->r2M_to_44M_nsmr); pcur += ocrdma_add_stat(stats, pcur, "active_44M_to_1G_nsmr", (u64)rsrc_stats->r44M_to_1G_nsmr); pcur += ocrdma_add_stat(stats, pcur, "active_1G_to_4G_nsmr", (u64)rsrc_stats->r1G_to_4G_nsmr); pcur += ocrdma_add_stat(stats, pcur, "active_nsmr_count_4G_to_32G", (u64)rsrc_stats->nsmr_count_4G_to_32G); pcur += ocrdma_add_stat(stats, pcur, "active_32G_to_64G_nsmr", (u64)rsrc_stats->r32G_to_64G_nsmr); pcur += ocrdma_add_stat(stats, pcur, "active_64G_to_128G_nsmr", (u64)rsrc_stats->r64G_to_128G_nsmr); pcur += ocrdma_add_stat(stats, pcur, "active_128G_to_higher_nsmr", (u64)rsrc_stats->r128G_to_higher_nsmr); pcur += ocrdma_add_stat(stats, pcur, "active_embedded_nsmr", (u64)rsrc_stats->embedded_nsmr); pcur += ocrdma_add_stat(stats, pcur, "active_frmr", (u64)rsrc_stats->frmr); pcur += ocrdma_add_stat(stats, pcur, "active_prefetch_qps", (u64)rsrc_stats->prefetch_qps); pcur += ocrdma_add_stat(stats, pcur, "active_ondemand_qps", (u64)rsrc_stats->ondemand_qps); pcur += ocrdma_add_stat(stats, pcur, "active_phy_mr", (u64)rsrc_stats->phy_mr); pcur += ocrdma_add_stat(stats, pcur, "active_mw", (u64)rsrc_stats->mw); /* Print the threshold stats */ rsrc_stats = &rdma_stats->th_rsrc_stats; pcur += ocrdma_add_stat(stats, pcur, "threshold_dpp_pds", (u64)rsrc_stats->dpp_pds); pcur += ocrdma_add_stat(stats, pcur, "threshold_non_dpp_pds", (u64)rsrc_stats->non_dpp_pds); pcur += ocrdma_add_stat(stats, pcur, "threshold_rc_dpp_qps", (u64)rsrc_stats->rc_dpp_qps); pcur += ocrdma_add_stat(stats, pcur, "threshold_uc_dpp_qps", (u64)rsrc_stats->uc_dpp_qps); pcur += ocrdma_add_stat(stats, pcur, "threshold_ud_dpp_qps", (u64)rsrc_stats->ud_dpp_qps); pcur += ocrdma_add_stat(stats, pcur, "threshold_rc_non_dpp_qps", (u64)rsrc_stats->rc_non_dpp_qps); pcur += ocrdma_add_stat(stats, pcur, "threshold_uc_non_dpp_qps", (u64)rsrc_stats->uc_non_dpp_qps); pcur += ocrdma_add_stat(stats, pcur, "threshold_ud_non_dpp_qps", (u64)rsrc_stats->ud_non_dpp_qps); pcur += ocrdma_add_stat(stats, pcur, "threshold_srqs", (u64)rsrc_stats->srqs); pcur += ocrdma_add_stat(stats, pcur, "threshold_rbqs", (u64)rsrc_stats->rbqs); pcur += ocrdma_add_stat(stats, pcur, "threshold_64K_nsmr", (u64)rsrc_stats->r64K_nsmr); pcur += ocrdma_add_stat(stats, pcur, "threshold_64K_to_2M_nsmr", (u64)rsrc_stats->r64K_to_2M_nsmr); pcur += ocrdma_add_stat(stats, pcur, "threshold_2M_to_44M_nsmr", (u64)rsrc_stats->r2M_to_44M_nsmr); pcur += ocrdma_add_stat(stats, pcur, "threshold_44M_to_1G_nsmr", (u64)rsrc_stats->r44M_to_1G_nsmr); pcur += ocrdma_add_stat(stats, pcur, "threshold_1G_to_4G_nsmr", (u64)rsrc_stats->r1G_to_4G_nsmr); pcur += ocrdma_add_stat(stats, pcur, "threshold_nsmr_count_4G_to_32G", (u64)rsrc_stats->nsmr_count_4G_to_32G); pcur += ocrdma_add_stat(stats, pcur, "threshold_32G_to_64G_nsmr", (u64)rsrc_stats->r32G_to_64G_nsmr); pcur += ocrdma_add_stat(stats, pcur, "threshold_64G_to_128G_nsmr", (u64)rsrc_stats->r64G_to_128G_nsmr); pcur += ocrdma_add_stat(stats, pcur, "threshold_128G_to_higher_nsmr", (u64)rsrc_stats->r128G_to_higher_nsmr); pcur += ocrdma_add_stat(stats, pcur, "threshold_embedded_nsmr", (u64)rsrc_stats->embedded_nsmr); pcur += ocrdma_add_stat(stats, pcur, "threshold_frmr", (u64)rsrc_stats->frmr); pcur += ocrdma_add_stat(stats, pcur, "threshold_prefetch_qps", (u64)rsrc_stats->prefetch_qps); pcur += ocrdma_add_stat(stats, pcur, "threshold_ondemand_qps", (u64)rsrc_stats->ondemand_qps); pcur += ocrdma_add_stat(stats, pcur, "threshold_phy_mr", (u64)rsrc_stats->phy_mr); pcur += ocrdma_add_stat(stats, pcur, "threshold_mw", (u64)rsrc_stats->mw); return stats; } static char *ocrdma_rx_stats(struct ocrdma_dev *dev) { char *stats = dev->stats_mem.debugfs_mem, *pcur; struct ocrdma_rdma_stats_resp *rdma_stats = (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va; struct ocrdma_rx_stats *rx_stats = &rdma_stats->rx_stats; memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM)); pcur = stats; pcur += ocrdma_add_stat (stats, pcur, "roce_frame_bytes", convert_to_64bit(rx_stats->roce_frame_bytes_lo, rx_stats->roce_frame_bytes_hi)); pcur += ocrdma_add_stat(stats, pcur, "roce_frame_icrc_drops", (u64)rx_stats->roce_frame_icrc_drops); pcur += ocrdma_add_stat(stats, pcur, "roce_frame_payload_len_drops", (u64)rx_stats->roce_frame_payload_len_drops); pcur += ocrdma_add_stat(stats, pcur, "ud_drops", (u64)rx_stats->ud_drops); pcur += ocrdma_add_stat(stats, pcur, "qp1_drops", (u64)rx_stats->qp1_drops); pcur += ocrdma_add_stat(stats, pcur, "psn_error_request_packets", (u64)rx_stats->psn_error_request_packets); pcur += ocrdma_add_stat(stats, pcur, "psn_error_resp_packets", (u64)rx_stats->psn_error_resp_packets); pcur += ocrdma_add_stat(stats, pcur, "rnr_nak_timeouts", (u64)rx_stats->rnr_nak_timeouts); pcur += ocrdma_add_stat(stats, pcur, "rnr_nak_receives", (u64)rx_stats->rnr_nak_receives); pcur += ocrdma_add_stat(stats, pcur, "roce_frame_rxmt_drops", (u64)rx_stats->roce_frame_rxmt_drops); pcur += ocrdma_add_stat(stats, pcur, "nak_count_psn_sequence_errors", (u64)rx_stats->nak_count_psn_sequence_errors); pcur += ocrdma_add_stat(stats, pcur, "rc_drop_count_lookup_errors", (u64)rx_stats->rc_drop_count_lookup_errors); pcur += ocrdma_add_stat(stats, pcur, "rq_rnr_naks", (u64)rx_stats->rq_rnr_naks); pcur += ocrdma_add_stat(stats, pcur, "srq_rnr_naks", (u64)rx_stats->srq_rnr_naks); pcur += ocrdma_add_stat(stats, pcur, "roce_frames", convert_to_64bit(rx_stats->roce_frames_lo, rx_stats->roce_frames_hi)); return stats; } static u64 ocrdma_sysfs_rcv_pkts(struct ocrdma_dev *dev) { struct ocrdma_rdma_stats_resp *rdma_stats = (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va; struct ocrdma_rx_stats *rx_stats = &rdma_stats->rx_stats; return convert_to_64bit(rx_stats->roce_frames_lo, rx_stats->roce_frames_hi) + (u64)rx_stats->roce_frame_icrc_drops + (u64)rx_stats->roce_frame_payload_len_drops; } static u64 ocrdma_sysfs_rcv_data(struct ocrdma_dev *dev) { struct ocrdma_rdma_stats_resp *rdma_stats = (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va; struct ocrdma_rx_stats *rx_stats = &rdma_stats->rx_stats; return (convert_to_64bit(rx_stats->roce_frame_bytes_lo, rx_stats->roce_frame_bytes_hi))/4; } static char *ocrdma_tx_stats(struct ocrdma_dev *dev) { char *stats = dev->stats_mem.debugfs_mem, *pcur; struct ocrdma_rdma_stats_resp *rdma_stats = (struct ocrdma_rdma_stats_resp *)d
/*
 * Switch a MMU context.
 *
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 *
 * Copyright (C) 1996, 1997, 1998, 1999 by Ralf Baechle
 * Copyright (C) 1999 Silicon Graphics, Inc.
 */
#ifndef _ASM_MMU_CONTEXT_H
#define _ASM_MMU_CONTEXT_H

#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/slab.h>
#include <asm/cacheflush.h>
#include <asm/hazards.h>
#include <asm/tlbflush.h>
#include <asm-generic/mm_hooks.h>

#define htw_set_pwbase(pgd)						\
do {									\
	if (cpu_has_htw) {						\
		write_c0_pwbase(pgd);					\
		back_to_back_c0_hazard();				\
	}								\
} while (0)

#define TLBMISS_HANDLER_SETUP_PGD(pgd)					\
do {									\
	extern void tlbmiss_handler_setup_pgd(unsigned long);		\
	tlbmiss_handler_setup_pgd((unsigned long)(pgd));		\
	htw_set_pwbase((unsigned long)pgd);				\
} while (0)

#ifdef CONFIG_MIPS_PGD_C0_CONTEXT

#define TLBMISS_HANDLER_RESTORE()					\
	write_c0_xcontext((unsigned long) smp_processor_id() <<		\
			  SMP_CPUID_REGSHIFT)

#define TLBMISS_HANDLER_SETUP()						\
	do {								\
		TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir);		\
		TLBMISS_HANDLER_RESTORE();				\
	} while (0)

#else /* !CONFIG_MIPS_PGD_C0_CONTEXT: using  pgd_current*/

/*
 * For the fast tlb miss handlers, we keep a per cpu array of pointers
 * to the current pgd for each processor. Also, the proc. id is stuffed
 * into the context register.
 */
extern unsigned long pgd_current[];

#define TLBMISS_HANDLER_RESTORE()					\
	write_c0_context((unsigned long) smp_processor_id() <<		\
			 SMP_CPUID_REGSHIFT)

#define TLBMISS_HANDLER_SETUP()						\
	TLBMISS_HANDLER_RESTORE();					\
	back_to_back_c0_hazard();					\
	TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir)
#endif /* CONFIG_MIPS_PGD_C0_CONTEXT*/
#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)

#define ASID_INC	0x40
#define ASID_MASK	0xfc0

#elif defined(CONFIG_CPU_R8000)

#define ASID_INC	0x10
#define ASID_MASK	0xff0

#else /* FIXME: not correct for R6000 */

#define ASID_INC	0x1
#define ASID_MASK	0xff

#endif

#define cpu_context(cpu, mm)	((mm)->context.asid[cpu])
#define cpu_asid(cpu, mm)	(cpu_context((cpu), (mm)) & ASID_MASK)
#define asid_cache(cpu)		(cpu_data[cpu].asid_cache)

static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
}

/*
 *  All unused by hardware upper bits will be considered
 *  as a software asid extension.
 */
#define ASID_VERSION_MASK  ((unsigned long)~(ASID_MASK|(ASID_MASK-1)))
#define ASID_FIRST_VERSION ((unsigned long)(~ASID_VERSION_MASK) + 1)

/* Normal, classic MIPS get_new_mmu_context */
static inline void
get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
{
	extern void kvm_local_flush_tlb_all(void);
	unsigned long asid = asid_cache(cpu);

	if (! ((asid += ASID_INC) & ASID_MASK) ) {
		if (cpu_has_vtag_icache)
			flush_icache_all();
#ifdef CONFIG_KVM
		kvm_local_flush_tlb_all();      /* start new asid cycle */
#else
		local_flush_tlb_all();	/* start new asid cycle */
#endif
		if (!asid)		/* fix version if needed */
			asid = ASID_FIRST_VERSION;
	}

	cpu_context(cpu, mm) = asid_cache(cpu) = asid;
}

/*
 * Initialize the context related info for a new mm_struct
 * instance.
 */
static inline int
init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
	int i;

	for_each_possible_cpu(i)
		cpu_context(i, mm) = 0;

	atomic_set(&mm->context.fp_mode_switching, 0);

	return 0;
}

static inline