2123 lines
		
	
	
		
			51 KiB
		
	
	
	
		
			C
		
	
	
		
			Executable File
		
	
	
	
	
			
		
		
	
	
			2123 lines
		
	
	
		
			51 KiB
		
	
	
	
		
			C
		
	
	
		
			Executable File
		
	
	
	
	
| /*
 | |
|  * Linux OS Independent Layer
 | |
|  *
 | |
|  * Copyright (C) 1999-2019, Broadcom.
 | |
|  *
 | |
|  *      Unless you and Broadcom execute a separate written software license
 | |
|  * agreement governing use of this software, this software is licensed to you
 | |
|  * under the terms of the GNU General Public License version 2 (the "GPL"),
 | |
|  * available at http://www.broadcom.com/licenses/GPLv2.php, with the
 | |
|  * following added to such license:
 | |
|  *
 | |
|  *      As a special exception, the copyright holders of this software give you
 | |
|  * permission to link this software with independent modules, and to copy and
 | |
|  * distribute the resulting executable under terms of your choice, provided that
 | |
|  * you also meet, for each linked independent module, the terms and conditions of
 | |
|  * the license of that module.  An independent module is a module which is not
 | |
|  * derived from this software.  The special exception does not apply to any
 | |
|  * modifications of the software.
 | |
|  *
 | |
|  *      Notwithstanding the above, under no circumstances may you combine this
 | |
|  * software in any way with any other Broadcom software provided under a license
 | |
|  * other than the GPL, without Broadcom's express prior written consent.
 | |
|  *
 | |
|  *
 | |
|  * <<Broadcom-WL-IPTag/Open:>>
 | |
|  *
 | |
|  * $Id: linux_osl.c 815919 2019-04-22 09:06:50Z $
 | |
|  */
 | |
| 
 | |
| #define LINUX_PORT
 | |
| 
 | |
| #include <typedefs.h>
 | |
| #include <bcmendian.h>
 | |
| #include <linuxver.h>
 | |
| #include <bcmdefs.h>
 | |
| 
 | |
| #if defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING)
 | |
| #include <asm/cacheflush.h>
 | |
| #endif /* __ARM_ARCH_7A__ && !DHD_USE_COHERENT_MEM_FOR_RING */
 | |
| 
 | |
| #include <linux/random.h>
 | |
| 
 | |
| #include <osl.h>
 | |
| #include <bcmutils.h>
 | |
| #include <linux/delay.h>
 | |
| #include <linux/vmalloc.h>
 | |
| #include <pcicfg.h>
 | |
| #if (LINUX_VERSION_CODE <= KERNEL_VERSION(4, 8, 0))
 | |
| #include <asm-generic/pci-dma-compat.h>
 | |
| #endif
 | |
| 
 | |
| #ifdef BCM_SECURE_DMA
 | |
| #include <linux/module.h>
 | |
| #include <linux/kernel.h>
 | |
| #include <linux/io.h>
 | |
| #include <linux/printk.h>
 | |
| #include <linux/errno.h>
 | |
| #include <linux/mm.h>
 | |
| #include <linux/moduleparam.h>
 | |
| #include <asm/io.h>
 | |
| #include <linux/skbuff.h>
 | |
| #include <stbutils.h>
 | |
| #include <linux/highmem.h>
 | |
| #include <linux/dma-mapping.h>
 | |
| #include <asm/memory.h>
 | |
| #endif /* BCM_SECURE_DMA */
 | |
| 
 | |
| #include <linux/fs.h>
 | |
| 
 | |
| #if defined(STB)
 | |
| #include <linux/spinlock.h>
 | |
| extern spinlock_t l2x0_reg_lock;
 | |
| #endif // endif
 | |
| 
 | |
| #ifdef BCM_OBJECT_TRACE
 | |
| #include <bcmutils.h>
 | |
| #endif /* BCM_OBJECT_TRACE */
 | |
| #include "linux_osl_priv.h"
 | |
| 
 | |
| #define PCI_CFG_RETRY		10
 | |
| 
 | |
| #define DUMPBUFSZ 1024
 | |
| 
 | |
| #ifdef BCM_SECURE_DMA
 | |
| static void * osl_sec_dma_ioremap(osl_t *osh, struct page *page, size_t size,
 | |
| 	bool iscache, bool isdecr);
 | |
| static void osl_sec_dma_iounmap(osl_t *osh, void *contig_base_va, size_t size);
 | |
| static int osl_sec_dma_init_elem_mem_block(osl_t *osh, size_t mbsize, int max,
 | |
| 	sec_mem_elem_t **list);
 | |
| static void osl_sec_dma_deinit_elem_mem_block(osl_t *osh, size_t mbsize, int max,
 | |
| 	void *sec_list_base);
 | |
| static sec_mem_elem_t * osl_sec_dma_alloc_mem_elem(osl_t *osh, void *va, uint size,
 | |
| 	int direction, struct sec_cma_info *ptr_cma_info, uint offset);
 | |
| static void osl_sec_dma_free_mem_elem(osl_t *osh, sec_mem_elem_t *sec_mem_elem);
 | |
| static void osl_sec_dma_init_consistent(osl_t *osh);
 | |
| static void *osl_sec_dma_alloc_consistent(osl_t *osh, uint size, uint16 align_bits,
 | |
| 	ulong *pap);
 | |
| static void osl_sec_dma_free_consistent(osl_t *osh, void *va, uint size, dmaaddr_t pa);
 | |
| #endif /* BCM_SECURE_DMA */
 | |
| 
 | |
| /* PCMCIA attribute space access macros */
 | |
| 
 | |
| uint32 g_assert_type = 0; /* By Default Kernel Panic */
 | |
| 
 | |
| module_param(g_assert_type, int, 0);
 | |
| #ifdef	BCM_SECURE_DMA
 | |
| #define	SECDMA_MODULE_PARAMS	0
 | |
| #define	SECDMA_EXT_FILE	1
 | |
| unsigned long secdma_addr = 0;
 | |
| unsigned long secdma_addr2 = 0;
 | |
| u32 secdma_size = 0;
 | |
| u32 secdma_size2 = 0;
 | |
| module_param(secdma_addr, ulong, 0);
 | |
| module_param(secdma_size, int, 0);
 | |
| module_param(secdma_addr2, ulong, 0);
 | |
| module_param(secdma_size2, int, 0);
 | |
| static int secdma_found = 0;
 | |
| #endif /* BCM_SECURE_DMA */
 | |
| 
 | |
| #ifdef USE_DMA_LOCK
 | |
| static void osl_dma_lock(osl_t *osh);
 | |
| static void osl_dma_unlock(osl_t *osh);
 | |
| static void osl_dma_lock_init(osl_t *osh);
 | |
| 
 | |
| #define DMA_LOCK(osh)		osl_dma_lock(osh)
 | |
| #define DMA_UNLOCK(osh)		osl_dma_unlock(osh)
 | |
| #define DMA_LOCK_INIT(osh)	osl_dma_lock_init(osh);
 | |
| #else
 | |
| #define DMA_LOCK(osh)		do { /* noop */ } while(0)
 | |
| #define DMA_UNLOCK(osh)		do { /* noop */ } while(0)
 | |
| #define DMA_LOCK_INIT(osh)	do { /* noop */ } while(0)
 | |
| #endif /* USE_DMA_LOCK */
 | |
| 
 | |
| static int16 linuxbcmerrormap[] =
 | |
| {	0,				/* 0 */
 | |
| 	-EINVAL,		/* BCME_ERROR */
 | |
| 	-EINVAL,		/* BCME_BADARG */
 | |
| 	-EINVAL,		/* BCME_BADOPTION */
 | |
| 	-EINVAL,		/* BCME_NOTUP */
 | |
| 	-EINVAL,		/* BCME_NOTDOWN */
 | |
| 	-EINVAL,		/* BCME_NOTAP */
 | |
| 	-EINVAL,		/* BCME_NOTSTA */
 | |
| 	-EINVAL,		/* BCME_BADKEYIDX */
 | |
| 	-EINVAL,		/* BCME_RADIOOFF */
 | |
| 	-EINVAL,		/* BCME_NOTBANDLOCKED */
 | |
| 	-EINVAL, 		/* BCME_NOCLK */
 | |
| 	-EINVAL, 		/* BCME_BADRATESET */
 | |
| 	-EINVAL, 		/* BCME_BADBAND */
 | |
| 	-E2BIG,			/* BCME_BUFTOOSHORT */
 | |
| 	-E2BIG,			/* BCME_BUFTOOLONG */
 | |
| 	-EBUSY, 		/* BCME_BUSY */
 | |
| 	-EINVAL, 		/* BCME_NOTASSOCIATED */
 | |
| 	-EINVAL, 		/* BCME_BADSSIDLEN */
 | |
| 	-EINVAL, 		/* BCME_OUTOFRANGECHAN */
 | |
| 	-EINVAL, 		/* BCME_BADCHAN */
 | |
| 	-EFAULT, 		/* BCME_BADADDR */
 | |
| 	-ENOMEM, 		/* BCME_NORESOURCE */
 | |
| 	-EOPNOTSUPP,		/* BCME_UNSUPPORTED */
 | |
| 	-EMSGSIZE,		/* BCME_BADLENGTH */
 | |
| 	-EINVAL,		/* BCME_NOTREADY */
 | |
| 	-EPERM,			/* BCME_EPERM */
 | |
| 	-ENOMEM, 		/* BCME_NOMEM */
 | |
| 	-EINVAL, 		/* BCME_ASSOCIATED */
 | |
| 	-ERANGE, 		/* BCME_RANGE */
 | |
| 	-EINVAL, 		/* BCME_NOTFOUND */
 | |
| 	-EINVAL, 		/* BCME_WME_NOT_ENABLED */
 | |
| 	-EINVAL, 		/* BCME_TSPEC_NOTFOUND */
 | |
| 	-EINVAL, 		/* BCME_ACM_NOTSUPPORTED */
 | |
| 	-EINVAL,		/* BCME_NOT_WME_ASSOCIATION */
 | |
| 	-EIO,			/* BCME_SDIO_ERROR */
 | |
| 	-ENODEV,		/* BCME_DONGLE_DOWN */
 | |
| 	-EINVAL,		/* BCME_VERSION */
 | |
| 	-EIO,			/* BCME_TXFAIL */
 | |
| 	-EIO,			/* BCME_RXFAIL */
 | |
| 	-ENODEV,		/* BCME_NODEVICE */
 | |
| 	-EINVAL,		/* BCME_NMODE_DISABLED */
 | |
| 	-ENODATA,		/* BCME_NONRESIDENT */
 | |
| 	-EINVAL,		/* BCME_SCANREJECT */
 | |
| 	-EINVAL,		/* BCME_USAGE_ERROR */
 | |
| 	-EIO,     		/* BCME_IOCTL_ERROR */
 | |
| 	-EIO,			/* BCME_SERIAL_PORT_ERR */
 | |
| 	-EOPNOTSUPP,	/* BCME_DISABLED, BCME_NOTENABLED */
 | |
| 	-EIO,			/* BCME_DECERR */
 | |
| 	-EIO,			/* BCME_ENCERR */
 | |
| 	-EIO,			/* BCME_MICERR */
 | |
| 	-ERANGE,		/* BCME_REPLAY */
 | |
| 	-EINVAL,		/* BCME_IE_NOTFOUND */
 | |
| 	-EINVAL,		/* BCME_DATA_NOTFOUND */
 | |
| 	-EINVAL,        /* BCME_NOT_GC */
 | |
| 	-EINVAL,        /* BCME_PRS_REQ_FAILED */
 | |
| 	-EINVAL,        /* BCME_NO_P2P_SE */
 | |
| 	-EINVAL,        /* BCME_NOA_PND */
 | |
| 	-EINVAL,        /* BCME_FRAG_Q_FAILED */
 | |
| 	-EINVAL,        /* BCME_GET_AF_FAILED */
 | |
| 	-EINVAL,	/* BCME_MSCH_NOTREADY */
 | |
| 	-EINVAL,	/* BCME_IOV_LAST_CMD */
 | |
| 	-EINVAL,	/* BCME_MINIPMU_CAL_FAIL */
 | |
| 	-EINVAL,	/* BCME_RCAL_FAIL */
 | |
| 	-EINVAL,	/* BCME_LPF_RCCAL_FAIL */
 | |
| 	-EINVAL,	/* BCME_DACBUF_RCCAL_FAIL */
 | |
| 	-EINVAL,	/* BCME_VCOCAL_FAIL */
 | |
| 	-EINVAL,	/* BCME_BANDLOCKED */
 | |
| 	-EINVAL,	/* BCME_DNGL_DEVRESET */
 | |
| 
 | |
| /* When an new error code is added to bcmutils.h, add os
 | |
|  * specific error translation here as well
 | |
|  */
 | |
| /* check if BCME_LAST changed since the last time this function was updated */
 | |
| #if BCME_LAST != -68
 | |
| #error "You need to add a OS error translation in the linuxbcmerrormap \
 | |
| 	for new error code defined in bcmutils.h"
 | |
| #endif // endif
 | |
| };
 | |
| uint lmtest = FALSE;
 | |
| 
 | |
| #ifdef DHD_MAP_LOGGING
 | |
| #define DHD_MAP_LOG_SIZE 2048
 | |
| 
 | |
| typedef struct dhd_map_item {
 | |
| 	dmaaddr_t pa;		/* DMA address (physical) */
 | |
| 	uint64 ts_nsec;		/* timestamp: nsec */
 | |
| 	uint32 size;		/* mapping size */
 | |
| 	uint8 rsvd[4];		/* reserved for future use */
 | |
| } dhd_map_item_t;
 | |
| 
 | |
| typedef struct dhd_map_record {
 | |
| 	uint32 items;		/* number of total items */
 | |
| 	uint32 idx;		/* current index of metadata */
 | |
| 	dhd_map_item_t map[0];	/* metadata storage */
 | |
| } dhd_map_log_t;
 | |
| 
 | |
| void
 | |
| osl_dma_map_dump(osl_t *osh)
 | |
| {
 | |
| 	dhd_map_log_t *map_log, *unmap_log;
 | |
| 	uint64 ts_sec, ts_usec;
 | |
| 
 | |
| 	map_log = (dhd_map_log_t *)(osh->dhd_map_log);
 | |
| 	unmap_log = (dhd_map_log_t *)(osh->dhd_unmap_log);
 | |
| 	osl_get_localtime(&ts_sec, &ts_usec);
 | |
| 
 | |
| 	if (map_log && unmap_log) {
 | |
| 		printk("%s: map_idx=%d unmap_idx=%d "
 | |
| 			"current time=[%5lu.%06lu]\n", __FUNCTION__,
 | |
| 			map_log->idx, unmap_log->idx, (unsigned long)ts_sec,
 | |
| 			(unsigned long)ts_usec);
 | |
| 		printk("%s: dhd_map_log(pa)=0x%llx size=%d,"
 | |
| 			" dma_unmap_log(pa)=0x%llx size=%d\n", __FUNCTION__,
 | |
| 			(uint64)__virt_to_phys((ulong)(map_log->map)),
 | |
| 			(uint32)(sizeof(dhd_map_item_t) * map_log->items),
 | |
| 			(uint64)__virt_to_phys((ulong)(unmap_log->map)),
 | |
| 			(uint32)(sizeof(dhd_map_item_t) * unmap_log->items));
 | |
| 	}
 | |
| }
 | |
| 
 | |
| static void *
 | |
| osl_dma_map_log_init(uint32 item_len)
 | |
| {
 | |
| 	dhd_map_log_t *map_log;
 | |
| 	gfp_t flags;
 | |
| 	uint32 alloc_size = (uint32)(sizeof(dhd_map_log_t) +
 | |
| 		(item_len * sizeof(dhd_map_item_t)));
 | |
| 
 | |
| 	flags = CAN_SLEEP() ? GFP_KERNEL : GFP_ATOMIC;
 | |
| 	map_log = (dhd_map_log_t *)kmalloc(alloc_size, flags);
 | |
| 	if (map_log) {
 | |
| 		memset(map_log, 0, alloc_size);
 | |
| 		map_log->items = item_len;
 | |
| 		map_log->idx = 0;
 | |
| 	}
 | |
| 
 | |
| 	return (void *)map_log;
 | |
| }
 | |
| 
 | |
| static void
 | |
| osl_dma_map_log_deinit(osl_t *osh)
 | |
| {
 | |
| 	if (osh->dhd_map_log) {
 | |
| 		kfree(osh->dhd_map_log);
 | |
| 		osh->dhd_map_log = NULL;
 | |
| 	}
 | |
| 
 | |
| 	if (osh->dhd_unmap_log) {
 | |
| 		kfree(osh->dhd_unmap_log);
 | |
| 		osh->dhd_unmap_log = NULL;
 | |
| 	}
 | |
| }
 | |
| 
 | |
| static void
 | |
| osl_dma_map_logging(osl_t *osh, void *handle, dmaaddr_t pa, uint32 len)
 | |
| {
 | |
| 	dhd_map_log_t *log = (dhd_map_log_t *)handle;
 | |
| 	uint32 idx;
 | |
| 
 | |
| 	if (log == NULL) {
 | |
| 		printk("%s: log is NULL\n", __FUNCTION__);
 | |
| 		return;
 | |
| 	}
 | |
| 
 | |
| 	idx = log->idx;
 | |
| 	log->map[idx].ts_nsec = osl_localtime_ns();
 | |
| 	log->map[idx].pa = pa;
 | |
| 	log->map[idx].size = len;
 | |
| 	log->idx = (idx + 1) % log->items;
 | |
| }
 | |
| #endif /* DHD_MAP_LOGGING */
 | |
| 
 | |
| /* translate bcmerrors into linux errors */
 | |
| int
 | |
| osl_error(int bcmerror)
 | |
| {
 | |
| 	if (bcmerror > 0)
 | |
| 		bcmerror = 0;
 | |
| 	else if (bcmerror < BCME_LAST)
 | |
| 		bcmerror = BCME_ERROR;
 | |
| 
 | |
| 	/* Array bounds covered by ASSERT in osl_attach */
 | |
| 	return linuxbcmerrormap[-bcmerror];
 | |
| }
 | |
| osl_t *
 | |
| osl_attach(void *pdev, uint bustype, bool pkttag)
 | |
| {
 | |
| 	void **osl_cmn = NULL;
 | |
| 	osl_t *osh;
 | |
| 	gfp_t flags;
 | |
| #ifdef BCM_SECURE_DMA
 | |
| 	u32 secdma_memsize;
 | |
| #endif // endif
 | |
| 
 | |
| 	flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
 | |
| 	if (!(osh = kmalloc(sizeof(osl_t), flags)))
 | |
| 		return osh;
 | |
| 
 | |
| 	ASSERT(osh);
 | |
| 
 | |
| 	bzero(osh, sizeof(osl_t));
 | |
| 
 | |
| 	if (osl_cmn == NULL || *osl_cmn == NULL) {
 | |
| 		if (!(osh->cmn = kmalloc(sizeof(osl_cmn_t), flags))) {
 | |
| 			kfree(osh);
 | |
| 			return NULL;
 | |
| 		}
 | |
| 		bzero(osh->cmn, sizeof(osl_cmn_t));
 | |
| 		if (osl_cmn)
 | |
| 			*osl_cmn = osh->cmn;
 | |
| 		atomic_set(&osh->cmn->malloced, 0);
 | |
| 		osh->cmn->dbgmem_list = NULL;
 | |
| 		spin_lock_init(&(osh->cmn->dbgmem_lock));
 | |
| 
 | |
| 		spin_lock_init(&(osh->cmn->pktalloc_lock));
 | |
| 
 | |
| 	} else {
 | |
| 		osh->cmn = *osl_cmn;
 | |
| 	}
 | |
| 	atomic_add(1, &osh->cmn->refcount);
 | |
| 
 | |
| 	bcm_object_trace_init();
 | |
| 
 | |
| 	/* Check that error map has the right number of entries in it */
 | |
| 	ASSERT(ABS(BCME_LAST) == (ARRAYSIZE(linuxbcmerrormap) - 1));
 | |
| 
 | |
| 	osh->failed = 0;
 | |
| 	osh->pdev = pdev;
 | |
| 	osh->pub.pkttag = pkttag;
 | |
| 	osh->bustype = bustype;
 | |
| 	osh->magic = OS_HANDLE_MAGIC;
 | |
| #ifdef BCM_SECURE_DMA
 | |
| 
 | |
| 	if ((secdma_addr != 0) && (secdma_size != 0)) {
 | |
| 		printk("linux_osl.c: Buffer info passed via module params, using it.\n");
 | |
| 		if (secdma_found == 0) {
 | |
| 			osh->contig_base_alloc = (phys_addr_t)secdma_addr;
 | |
| 			secdma_memsize = secdma_size;
 | |
| 		} else if (secdma_found == 1) {
 | |
| 			osh->contig_base_alloc = (phys_addr_t)secdma_addr2;
 | |
| 			secdma_memsize = secdma_size2;
 | |
| 		} else {
 | |
| 			printk("linux_osl.c secdma: secDMA instances %d \n", secdma_found);
 | |
| 			kfree(osh);
 | |
| 			return NULL;
 | |
| 		}
 | |
| 		osh->contig_base = (phys_addr_t)osh->contig_base_alloc;
 | |
| 		printf("linux_osl.c: secdma_cma_size = 0x%x\n", secdma_memsize);
 | |
| 		printf("linux_osl.c: secdma_cma_addr = 0x%x \n",
 | |
| 			(unsigned int)osh->contig_base_alloc);
 | |
| 		osh->stb_ext_params = SECDMA_MODULE_PARAMS;
 | |
| 	}
 | |
| 	else if (stbpriv_init(osh) == 0) {
 | |
| 		printk("linux_osl.c: stbpriv.txt found. Get buffer info.\n");
 | |
| 		if (secdma_found == 0) {
 | |
| 			osh->contig_base_alloc =
 | |
| 				(phys_addr_t)bcm_strtoul(stbparam_get("secdma_cma_addr"), NULL, 0);
 | |
| 			secdma_memsize = bcm_strtoul(stbparam_get("secdma_cma_size"), NULL, 0);
 | |
| 		} else if (secdma_found == 1) {
 | |
| 			osh->contig_base_alloc =
 | |
| 				(phys_addr_t)bcm_strtoul(stbparam_get("secdma_cma_addr2"), NULL, 0);
 | |
| 			secdma_memsize = bcm_strtoul(stbparam_get("secdma_cma_size2"), NULL, 0);
 | |
| 		} else {
 | |
| 			printk("linux_osl.c secdma: secDMA instances %d \n", secdma_found);
 | |
| 			kfree(osh);
 | |
| 			return NULL;
 | |
| 		}
 | |
| 		osh->contig_base = (phys_addr_t)osh->contig_base_alloc;
 | |
| 		printf("linux_osl.c: secdma_cma_size = 0x%x\n", secdma_memsize);
 | |
| 		printf("linux_osl.c: secdma_cma_addr = 0x%x \n",
 | |
| 			(unsigned int)osh->contig_base_alloc);
 | |
| 		osh->stb_ext_params = SECDMA_EXT_FILE;
 | |
| 	}
 | |
| 	else {
 | |
| 		printk("linux_osl.c: secDMA no longer supports internal buffer allocation.\n");
 | |
| 		kfree(osh);
 | |
| 		return NULL;
 | |
| 	}
 | |
| 	secdma_found++;
 | |
| 	osh->contig_base_alloc_coherent_va = osl_sec_dma_ioremap(osh,
 | |
| 		phys_to_page((u32)osh->contig_base_alloc),
 | |
| 		CMA_DMA_DESC_MEMBLOCK, FALSE, TRUE);
 | |
| 
 | |
| 	if (osh->contig_base_alloc_coherent_va == NULL) {
 | |
| 		if (osh->cmn)
 | |
| 			kfree(osh->cmn);
 | |
| 	    kfree(osh);
 | |
| 	    return NULL;
 | |
| 	}
 | |
| 	osh->contig_base_coherent_va = osh->contig_base_alloc_coherent_va;
 | |
| 	osh->contig_base_alloc_coherent = osh->contig_base_alloc;
 | |
| 	osl_sec_dma_init_consistent(osh);
 | |
| 
 | |
| 	osh->contig_base_alloc += CMA_DMA_DESC_MEMBLOCK;
 | |
| 
 | |
| 	osh->contig_base_alloc_va = osl_sec_dma_ioremap(osh,
 | |
| 		phys_to_page((u32)osh->contig_base_alloc), CMA_DMA_DATA_MEMBLOCK, TRUE, FALSE);
 | |
| 	if (osh->contig_base_alloc_va == NULL) {
 | |
| 		osl_sec_dma_iounmap(osh, osh->contig_base_coherent_va, CMA_DMA_DESC_MEMBLOCK);
 | |
| 		if (osh->cmn)
 | |
| 			kfree(osh->cmn);
 | |
| 		kfree(osh);
 | |
| 		return NULL;
 | |
| 	}
 | |
| 	osh->contig_base_va = osh->contig_base_alloc_va;
 | |
| 
 | |
| #ifdef NOT_YET
 | |
| 	/*
 | |
| 	* osl_sec_dma_init_elem_mem_block(osh, CMA_BUFSIZE_512, CMA_BUFNUM, &osh->sec_list_512);
 | |
| 	* osh->sec_list_base_512 = osh->sec_list_512;
 | |
| 	* osl_sec_dma_init_elem_mem_block(osh, CMA_BUFSIZE_2K, CMA_BUFNUM, &osh->sec_list_2048);
 | |
| 	* osh->sec_list_base_2048 = osh->sec_list_2048;
 | |
| 	*/
 | |
| #endif // endif
 | |
| 	if (BCME_OK != osl_sec_dma_init_elem_mem_block(osh,
 | |
| 		CMA_BUFSIZE_4K, CMA_BUFNUM, &osh->sec_list_4096)) {
 | |
| 	    osl_sec_dma_iounmap(osh, osh->contig_base_coherent_va, CMA_DMA_DESC_MEMBLOCK);
 | |
| 	    osl_sec_dma_iounmap(osh, osh->contig_base_va, CMA_DMA_DATA_MEMBLOCK);
 | |
| 		if (osh->cmn)
 | |
| 			kfree(osh->cmn);
 | |
| 		kfree(osh);
 | |
| 		return NULL;
 | |
| 	}
 | |
| 	osh->sec_list_base_4096 = osh->sec_list_4096;
 | |
| 
 | |
| #endif /* BCM_SECURE_DMA */
 | |
| 
 | |
| 	switch (bustype) {
 | |
| 		case PCI_BUS:
 | |
| 		case SI_BUS:
 | |
| 		case PCMCIA_BUS:
 | |
| 			osh->pub.mmbus = TRUE;
 | |
| 			break;
 | |
| 		case JTAG_BUS:
 | |
| 		case SDIO_BUS:
 | |
| 		case USB_BUS:
 | |
| 		case SPI_BUS:
 | |
| 		case RPC_BUS:
 | |
| 			osh->pub.mmbus = FALSE;
 | |
| 			break;
 | |
| 		default:
 | |
| 			ASSERT(FALSE);
 | |
| 			break;
 | |
| 	}
 | |
| 
 | |
| 	DMA_LOCK_INIT(osh);
 | |
| 
 | |
| #ifdef DHD_MAP_LOGGING
 | |
| 	osh->dhd_map_log = osl_dma_map_log_init(DHD_MAP_LOG_SIZE);
 | |
| 	if (osh->dhd_map_log == NULL) {
 | |
| 		printk("%s: Failed to alloc dhd_map_log\n", __FUNCTION__);
 | |
| 	}
 | |
| 
 | |
| 	osh->dhd_unmap_log = osl_dma_map_log_init(DHD_MAP_LOG_SIZE);
 | |
| 	if (osh->dhd_unmap_log == NULL) {
 | |
| 		printk("%s: Failed to alloc dhd_unmap_log\n", __FUNCTION__);
 | |
| 	}
 | |
| #endif /* DHD_MAP_LOGGING */
 | |
| 
 | |
| 	return osh;
 | |
| }
 | |
| 
 | |
| void osl_set_bus_handle(osl_t *osh, void *bus_handle)
 | |
| {
 | |
| 	osh->bus_handle = bus_handle;
 | |
| }
 | |
| 
 | |
| void* osl_get_bus_handle(osl_t *osh)
 | |
| {
 | |
| 	return osh->bus_handle;
 | |
| }
 | |
| 
 | |
| #if defined(BCM_BACKPLANE_TIMEOUT)
 | |
| void osl_set_bpt_cb(osl_t *osh, void *bpt_cb, void *bpt_ctx)
 | |
| {
 | |
| 	if (osh) {
 | |
| 		osh->bpt_cb = (bpt_cb_fn)bpt_cb;
 | |
| 		osh->sih = bpt_ctx;
 | |
| 	}
 | |
| }
 | |
| #endif	/* BCM_BACKPLANE_TIMEOUT */
 | |
| 
 | |
| void
 | |
| osl_detach(osl_t *osh)
 | |
| {
 | |
| 	if (osh == NULL)
 | |
| 		return;
 | |
| 
 | |
| #ifdef BCM_SECURE_DMA
 | |
| 	if (osh->stb_ext_params == SECDMA_EXT_FILE)
 | |
| 		stbpriv_exit(osh);
 | |
| #ifdef NOT_YET
 | |
| 	osl_sec_dma_deinit_elem_mem_block(osh, CMA_BUFSIZE_512, CMA_BUFNUM, osh->sec_list_base_512);
 | |
| 	osl_sec_dma_deinit_elem_mem_block(osh, CMA_BUFSIZE_2K, CMA_BUFNUM, osh->sec_list_base_2048);
 | |
| #endif /* NOT_YET */
 | |
| 	osl_sec_dma_deinit_elem_mem_block(osh, CMA_BUFSIZE_4K, CMA_BUFNUM, osh->sec_list_base_4096);
 | |
| 	osl_sec_dma_iounmap(osh, osh->contig_base_coherent_va, CMA_DMA_DESC_MEMBLOCK);
 | |
| 	osl_sec_dma_iounmap(osh, osh->contig_base_va, CMA_DMA_DATA_MEMBLOCK);
 | |
| 	secdma_found--;
 | |
| #endif /* BCM_SECURE_DMA */
 | |
| 
 | |
| 	bcm_object_trace_deinit();
 | |
| 
 | |
| #ifdef DHD_MAP_LOGGING
 | |
| 	osl_dma_map_log_deinit(osh->dhd_map_log);
 | |
| 	osl_dma_map_log_deinit(osh->dhd_unmap_log);
 | |
| #endif /* DHD_MAP_LOGGING */
 | |
| 
 | |
| 	ASSERT(osh->magic == OS_HANDLE_MAGIC);
 | |
| 	atomic_sub(1, &osh->cmn->refcount);
 | |
| 	if (atomic_read(&osh->cmn->refcount) == 0) {
 | |
| 			kfree(osh->cmn);
 | |
| 	}
 | |
| 	kfree(osh);
 | |
| }
 | |
| 
 | |
| /* APIs to set/get specific quirks in OSL layer */
 | |
| void BCMFASTPATH
 | |
| osl_flag_set(osl_t *osh, uint32 mask)
 | |
| {
 | |
| 	osh->flags |= mask;
 | |
| }
 | |
| 
 | |
| void
 | |
| osl_flag_clr(osl_t *osh, uint32 mask)
 | |
| {
 | |
| 	osh->flags &= ~mask;
 | |
| }
 | |
| 
 | |
| #if defined(STB)
 | |
| inline bool BCMFASTPATH
 | |
| #else
 | |
| bool
 | |
| #endif // endif
 | |
| osl_is_flag_set(osl_t *osh, uint32 mask)
 | |
| {
 | |
| 	return (osh->flags & mask);
 | |
| }
 | |
| 
 | |
| #if (defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING)) || \
 | |
| 	defined(STB_SOC_WIFI)
 | |
| 
 | |
| inline int BCMFASTPATH
 | |
| osl_arch_is_coherent(void)
 | |
| {
 | |
| 	return 0;
 | |
| }
 | |
| 
 | |
| inline int BCMFASTPATH
 | |
| osl_acp_war_enab(void)
 | |
| {
 | |
| 	return 0;
 | |
| }
 | |
| 
 | |
| inline void BCMFASTPATH
 | |
| osl_cache_flush(void *va, uint size)
 | |
| {
 | |
| 
 | |
| 	if (size > 0)
 | |
| #ifdef STB_SOC_WIFI
 | |
| 		dma_sync_single_for_device(OSH_NULL, virt_to_phys(va), size, DMA_TX);
 | |
| #else /* STB_SOC_WIFI */
 | |
| 		dma_sync_single_for_device(OSH_NULL, virt_to_dma(OSH_NULL, va), size,
 | |
| 			DMA_TO_DEVICE);
 | |
| #endif /* STB_SOC_WIFI */
 | |
| }
 | |
| 
 | |
| inline void BCMFASTPATH
 | |
| osl_cache_inv(void *va, uint size)
 | |
| {
 | |
| 
 | |
| #ifdef STB_SOC_WIFI
 | |
| 	dma_sync_single_for_cpu(OSH_NULL, virt_to_phys(va), size, DMA_RX);
 | |
| #else /* STB_SOC_WIFI */
 | |
| 	dma_sync_single_for_cpu(OSH_NULL, virt_to_dma(OSH_NULL, va), size, DMA_FROM_DEVICE);
 | |
| #endif /* STB_SOC_WIFI */
 | |
| }
 | |
| 
 | |
| inline void BCMFASTPATH
 | |
| osl_prefetch(const void *ptr)
 | |
| {
 | |
| #if !defined(STB_SOC_WIFI)
 | |
| 	__asm__ __volatile__("pld\t%0" :: "o"(*(const char *)ptr) : "cc");
 | |
| #endif // endif
 | |
| }
 | |
| 
 | |
| #endif // endif
 | |
| 
 | |
| uint32
 | |
| osl_pci_read_config(osl_t *osh, uint offset, uint size)
 | |
| {
 | |
| 	uint val = 0;
 | |
| 	uint retry = PCI_CFG_RETRY;
 | |
| 
 | |
| 	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
 | |
| 
 | |
| 	/* only 4byte access supported */
 | |
| 	ASSERT(size == 4);
 | |
| 
 | |
| 	do {
 | |
| 		pci_read_config_dword(osh->pdev, offset, &val);
 | |
| 		if (val != 0xffffffff)
 | |
| 			break;
 | |
| 	} while (retry--);
 | |
| 
 | |
| 	return (val);
 | |
| }
 | |
| 
 | |
| void
 | |
| osl_pci_write_config(osl_t *osh, uint offset, uint size, uint val)
 | |
| {
 | |
| 	uint retry = PCI_CFG_RETRY;
 | |
| 
 | |
| 	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
 | |
| 
 | |
| 	/* only 4byte access supported */
 | |
| 	ASSERT(size == 4);
 | |
| 
 | |
| 	do {
 | |
| 		pci_write_config_dword(osh->pdev, offset, val);
 | |
| 		if (offset != PCI_BAR0_WIN)
 | |
| 			break;
 | |
| 		if (osl_pci_read_config(osh, offset, size) == val)
 | |
| 			break;
 | |
| 	} while (retry--);
 | |
| 
 | |
| }
 | |
| 
 | |
| /* return bus # for the pci device pointed by osh->pdev */
 | |
| uint
 | |
| osl_pci_bus(osl_t *osh)
 | |
| {
 | |
| 	ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
 | |
| 
 | |
| #if defined(__ARM_ARCH_7A__)
 | |
| 	return pci_domain_nr(((struct pci_dev *)osh->pdev)->bus);
 | |
| #else
 | |
| 	return ((struct pci_dev *)osh->pdev)->bus->number;
 | |
| #endif // endif
 | |
| }
 | |
| 
 | |
| /* return slot # for the pci device pointed by osh->pdev */
 | |
| uint
 | |
| osl_pci_slot(osl_t *osh)
 | |
| {
 | |
| 	ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
 | |
| 
 | |
| #if defined(__ARM_ARCH_7A__)
 | |
| 	return PCI_SLOT(((struct pci_dev *)osh->pdev)->devfn) + 1;
 | |
| #else
 | |
| 	return PCI_SLOT(((struct pci_dev *)osh->pdev)->devfn);
 | |
| #endif // endif
 | |
| }
 | |
| 
 | |
| /* return domain # for the pci device pointed by osh->pdev */
 | |
| uint
 | |
| osl_pcie_domain(osl_t *osh)
 | |
| {
 | |
| 	ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
 | |
| 
 | |
| 	return pci_domain_nr(((struct pci_dev *)osh->pdev)->bus);
 | |
| }
 | |
| 
 | |
| /* return bus # for the pci device pointed by osh->pdev */
 | |
| uint
 | |
| osl_pcie_bus(osl_t *osh)
 | |
| {
 | |
| 	ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
 | |
| 
 | |
| 	return ((struct pci_dev *)osh->pdev)->bus->number;
 | |
| }
 | |
| 
 | |
| /* return the pci device pointed by osh->pdev */
 | |
| struct pci_dev *
 | |
| osl_pci_device(osl_t *osh)
 | |
| {
 | |
| 	ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
 | |
| 
 | |
| 	return osh->pdev;
 | |
| }
 | |
| 
 | |
| static void
 | |
| osl_pcmcia_attr(osl_t *osh, uint offset, char *buf, int size, bool write)
 | |
| {
 | |
| }
 | |
| 
 | |
| void
 | |
| osl_pcmcia_read_attr(osl_t *osh, uint offset, void *buf, int size)
 | |
| {
 | |
| 	osl_pcmcia_attr(osh, offset, (char *) buf, size, FALSE);
 | |
| }
 | |
| 
 | |
| void
 | |
| osl_pcmcia_write_attr(osl_t *osh, uint offset, void *buf, int size)
 | |
| {
 | |
| 	osl_pcmcia_attr(osh, offset, (char *) buf, size, TRUE);
 | |
| }
 | |
| 
 | |
| void *
 | |
| osl_malloc(osl_t *osh, uint size)
 | |
| {
 | |
| 	void *addr;
 | |
| 	gfp_t flags;
 | |
| 
 | |
| 	/* only ASSERT if osh is defined */
 | |
| 	if (osh)
 | |
| 		ASSERT(osh->magic == OS_HANDLE_MAGIC);
 | |
| #ifdef CONFIG_DHD_USE_STATIC_BUF
 | |
| 	if (bcm_static_buf)
 | |
| 	{
 | |
| 		unsigned long irq_flags;
 | |
| 		int i = 0;
 | |
| 		if ((size >= PAGE_SIZE)&&(size <= STATIC_BUF_SIZE))
 | |
| 		{
 | |
| 			spin_lock_irqsave(&bcm_static_buf->static_lock, irq_flags);
 | |
| 
 | |
| 			for (i = 0; i < STATIC_BUF_MAX_NUM; i++)
 | |
| 			{
 | |
| 				if (bcm_static_buf->buf_use[i] == 0)
 | |
| 					break;
 | |
| 			}
 | |
| 
 | |
| 			if (i == STATIC_BUF_MAX_NUM)
 | |
| 			{
 | |
| 				spin_unlock_irqrestore(&bcm_static_buf->static_lock, irq_flags);
 | |
| 				printk("all static buff in use!\n");
 | |
| 				goto original;
 | |
| 			}
 | |
| 
 | |
| 			bcm_static_buf->buf_use[i] = 1;
 | |
| 			spin_unlock_irqrestore(&bcm_static_buf->static_lock, irq_flags);
 | |
| 
 | |
| 			bzero(bcm_static_buf->buf_ptr+STATIC_BUF_SIZE*i, size);
 | |
| 			if (osh)
 | |
| 				atomic_add(size, &osh->cmn->malloced);
 | |
| 
 | |
| 			return ((void *)(bcm_static_buf->buf_ptr+STATIC_BUF_SIZE*i));
 | |
| 		}
 | |
| 	}
 | |
| original:
 | |
| #endif /* CONFIG_DHD_USE_STATIC_BUF */
 | |
| 
 | |
| 	flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
 | |
| 	if ((addr = kmalloc(size, flags)) == NULL) {
 | |
| 		if (osh)
 | |
| 			osh->failed++;
 | |
| 		return (NULL);
 | |
| 	}
 | |
| 	if (osh && osh->cmn)
 | |
| 		atomic_add(size, &osh->cmn->malloced);
 | |
| 
 | |
| 	return (addr);
 | |
| }
 | |
| 
 | |
| void *
 | |
| osl_mallocz(osl_t *osh, uint size)
 | |
| {
 | |
| 	void *ptr;
 | |
| 
 | |
| 	ptr = osl_malloc(osh, size);
 | |
| 
 | |
| 	if (ptr != NULL) {
 | |
| 		bzero(ptr, size);
 | |
| 	}
 | |
| 
 | |
| 	return ptr;
 | |
| }
 | |
| 
 | |
| void
 | |
| osl_mfree(osl_t *osh, void *addr, uint size)
 | |
| {
 | |
| #ifdef CONFIG_DHD_USE_STATIC_BUF
 | |
| 	unsigned long flags;
 | |
| 
 | |
| 	if (bcm_static_buf)
 | |
| 	{
 | |
| 		if ((addr > (void *)bcm_static_buf) && ((unsigned char *)addr
 | |
| 			<= ((unsigned char *)bcm_static_buf + STATIC_BUF_TOTAL_LEN)))
 | |
| 		{
 | |
| 			int buf_idx = 0;
 | |
| 
 | |
| 			buf_idx = ((unsigned char *)addr - bcm_static_buf->buf_ptr)/STATIC_BUF_SIZE;
 | |
| 
 | |
| 			spin_lock_irqsave(&bcm_static_buf->static_lock, flags);
 | |
| 			bcm_static_buf->buf_use[buf_idx] = 0;
 | |
| 			spin_unlock_irqrestore(&bcm_static_buf->static_lock, flags);
 | |
| 
 | |
| 			if (osh && osh->cmn) {
 | |
| 				ASSERT(osh->magic == OS_HANDLE_MAGIC);
 | |
| 				atomic_sub(size, &osh->cmn->malloced);
 | |
| 			}
 | |
| 			return;
 | |
| 		}
 | |
| 	}
 | |
| #endif /* CONFIG_DHD_USE_STATIC_BUF */
 | |
| 	if (osh && osh->cmn) {
 | |
| 		ASSERT(osh->magic == OS_HANDLE_MAGIC);
 | |
| 
 | |
| 		ASSERT(size <= osl_malloced(osh));
 | |
| 
 | |
| 		atomic_sub(size, &osh->cmn->malloced);
 | |
| 	}
 | |
| 	kfree(addr);
 | |
| }
 | |
| 
 | |
| void *
 | |
| osl_vmalloc(osl_t *osh, uint size)
 | |
| {
 | |
| 	void *addr;
 | |
| 
 | |
| 	/* only ASSERT if osh is defined */
 | |
| 	if (osh)
 | |
| 		ASSERT(osh->magic == OS_HANDLE_MAGIC);
 | |
| 	if ((addr = vmalloc(size)) == NULL) {
 | |
| 		if (osh)
 | |
| 			osh->failed++;
 | |
| 		return (NULL);
 | |
| 	}
 | |
| 	if (osh && osh->cmn)
 | |
| 		atomic_add(size, &osh->cmn->malloced);
 | |
| 
 | |
| 	return (addr);
 | |
| }
 | |
| 
 | |
| void *
 | |
| osl_vmallocz(osl_t *osh, uint size)
 | |
| {
 | |
| 	void *ptr;
 | |
| 
 | |
| 	ptr = osl_vmalloc(osh, size);
 | |
| 
 | |
| 	if (ptr != NULL) {
 | |
| 		bzero(ptr, size);
 | |
| 	}
 | |
| 
 | |
| 	return ptr;
 | |
| }
 | |
| 
 | |
| void
 | |
| osl_vmfree(osl_t *osh, void *addr, uint size)
 | |
| {
 | |
| 	if (osh && osh->cmn) {
 | |
| 		ASSERT(osh->magic == OS_HANDLE_MAGIC);
 | |
| 
 | |
| 		ASSERT(size <= osl_malloced(osh));
 | |
| 
 | |
| 		atomic_sub(size, &osh->cmn->malloced);
 | |
| 	}
 | |
| 	vfree(addr);
 | |
| }
 | |
| 
 | |
| uint
 | |
| osl_check_memleak(osl_t *osh)
 | |
| {
 | |
| 	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
 | |
| 	if (atomic_read(&osh->cmn->refcount) == 1)
 | |
| 		return (atomic_read(&osh->cmn->malloced));
 | |
| 	else
 | |
| 		return 0;
 | |
| }
 | |
| 
 | |
| uint
 | |
| osl_malloced(osl_t *osh)
 | |
| {
 | |
| 	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
 | |
| 	return (atomic_read(&osh->cmn->malloced));
 | |
| }
 | |
| 
 | |
| uint
 | |
| osl_malloc_failed(osl_t *osh)
 | |
| {
 | |
| 	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
 | |
| 	return (osh->failed);
 | |
| }
 | |
| 
 | |
| uint
 | |
| osl_dma_consistent_align(void)
 | |
| {
 | |
| 	return (PAGE_SIZE);
 | |
| }
 | |
| 
 | |
| void*
 | |
| osl_dma_alloc_consistent(osl_t *osh, uint size, uint16 align_bits, uint *alloced, dmaaddr_t *pap)
 | |
| {
 | |
| 	void *va;
 | |
| 	uint16 align = (1 << align_bits);
 | |
| 	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
 | |
| 
 | |
| 	if (!ISALIGNED(DMA_CONSISTENT_ALIGN, align))
 | |
| 		size += align;
 | |
| 	*alloced = size;
 | |
| 
 | |
| #ifndef	BCM_SECURE_DMA
 | |
| #if (defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING)) || \
 | |
| 	defined(STB_SOC_WIFI)
 | |
| 	va = kmalloc(size, GFP_ATOMIC | __GFP_ZERO);
 | |
| 	if (va)
 | |
| 		*pap = (ulong)__virt_to_phys((ulong)va);
 | |
| #else
 | |
| 	{
 | |
| 		dma_addr_t pap_lin;
 | |
| 		struct pci_dev *hwdev = osh->pdev;
 | |
| 		gfp_t flags;
 | |
| #ifdef DHD_ALLOC_COHERENT_MEM_FROM_ATOMIC_POOL
 | |
| 		flags = GFP_ATOMIC;
 | |
| #else
 | |
| 		flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
 | |
| #endif /* DHD_ALLOC_COHERENT_MEM_FROM_ATOMIC_POOL */
 | |
| 		va = dma_alloc_coherent(&hwdev->dev, size, &pap_lin, flags);
 | |
| #ifdef BCMDMA64OSL
 | |
| 		PHYSADDRLOSET(*pap, pap_lin & 0xffffffff);
 | |
| 		PHYSADDRHISET(*pap, (pap_lin >> 32) & 0xffffffff);
 | |
| #else
 | |
| 		*pap = (dmaaddr_t)pap_lin;
 | |
| #endif /* BCMDMA64OSL */
 | |
| 	}
 | |
| #endif /* __ARM_ARCH_7A__ && !DHD_USE_COHERENT_MEM_FOR_RING */
 | |
| #else
 | |
| 	va = osl_sec_dma_alloc_consistent(osh, size, align_bits, pap);
 | |
| #endif /* BCM_SECURE_DMA */
 | |
| 	return va;
 | |
| }
 | |
| 
 | |
| void
 | |
| osl_dma_free_consistent(osl_t *osh, void *va, uint size, dmaaddr_t pa)
 | |
| {
 | |
| #ifdef BCMDMA64OSL
 | |
| 	dma_addr_t paddr;
 | |
| #endif /* BCMDMA64OSL */
 | |
| 	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
 | |
| 
 | |
| #ifndef BCM_SECURE_DMA
 | |
| #if (defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING)) || \
 | |
| 	defined(STB_SOC_WIFI)
 | |
| 	kfree(va);
 | |
| #else
 | |
| #ifdef BCMDMA64OSL
 | |
| 	PHYSADDRTOULONG(pa, paddr);
 | |
| 	pci_free_consistent(osh->pdev, size, va, paddr);
 | |
| #else
 | |
| 	pci_free_consistent(osh->pdev, size, va, (dma_addr_t)pa);
 | |
| #endif /* BCMDMA64OSL */
 | |
| #endif /* __ARM_ARCH_7A__ && !DHD_USE_COHERENT_MEM_FOR_RING */
 | |
| #else
 | |
| 	osl_sec_dma_free_consistent(osh, va, size, pa);
 | |
| #endif /* BCM_SECURE_DMA */
 | |
| }
 | |
| 
 | |
| void *
 | |
| osl_virt_to_phys(void *va)
 | |
| {
 | |
| 	return (void *)(uintptr)virt_to_phys(va);
 | |
| }
 | |
| 
 | |
| #include <asm/cacheflush.h>
 | |
| void BCMFASTPATH
 | |
| osl_dma_flush(osl_t *osh, void *va, uint size, int direction, void *p, hnddma_seg_map_t *dmah)
 | |
| {
 | |
| 	return;
 | |
| }
 | |
| 
 | |
| dmaaddr_t BCMFASTPATH
 | |
| osl_dma_map(osl_t *osh, void *va, uint size, int direction, void *p, hnddma_seg_map_t *dmah)
 | |
| {
 | |
| 	int dir;
 | |
| 	dmaaddr_t ret_addr;
 | |
| 	dma_addr_t map_addr;
 | |
| 	int ret;
 | |
| 
 | |
| 	DMA_LOCK(osh);
 | |
| 
 | |
| 	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
 | |
| 	dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE;
 | |
| 
 | |
| #ifdef STB_SOC_WIFI
 | |
| #if (__LINUX_ARM_ARCH__ == 8)
 | |
| 	/* need to flush or invalidate the cache here */
 | |
| 	if (dir == DMA_TX) { /* to device */
 | |
| 		osl_cache_flush(va, size);
 | |
| 	} else if (dir == DMA_RX) { /* from device */
 | |
| 		osl_cache_inv(va, size);
 | |
| 	} else { /* both */
 | |
| 		osl_cache_flush(va, size);
 | |
| 		osl_cache_inv(va, size);
 | |
| 	}
 | |
| 	DMA_UNLOCK(osh);
 | |
| 	return virt_to_phys(va);
 | |
| #else /* (__LINUX_ARM_ARCH__ == 8) */
 | |
| 	map_addr = dma_map_single(osh->pdev, va, size, dir);
 | |
| 	DMA_UNLOCK(osh);
 | |
| 	return map_addr;
 | |
| #endif /* (__LINUX_ARM_ARCH__ == 8) */
 | |
| #else /* ! STB_SOC_WIFI */
 | |
| 	map_addr = pci_map_single(osh->pdev, va, size, dir);
 | |
| #endif	/* ! STB_SOC_WIFI */
 | |
| 
 | |
| 	ret = pci_dma_mapping_error(osh->pdev, map_addr);
 | |
| 
 | |
| 	if (ret) {
 | |
| 		printk("%s: Failed to map memory\n", __FUNCTION__);
 | |
| 		PHYSADDRLOSET(ret_addr, 0);
 | |
| 		PHYSADDRHISET(ret_addr, 0);
 | |
| 	} else {
 | |
| 		PHYSADDRLOSET(ret_addr, map_addr & 0xffffffff);
 | |
| 		PHYSADDRHISET(ret_addr, (map_addr >> 32) & 0xffffffff);
 | |
| 	}
 | |
| 
 | |
| #ifdef DHD_MAP_LOGGING
 | |
| 	osl_dma_map_logging(osh, osh->dhd_map_log, ret_addr, size);
 | |
| #endif /* DHD_MAP_LOGGING */
 | |
| 
 | |
| 	DMA_UNLOCK(osh);
 | |
| 
 | |
| 	return ret_addr;
 | |
| }
 | |
| 
 | |
| void BCMFASTPATH
 | |
| osl_dma_unmap(osl_t *osh, dmaaddr_t pa, uint size, int direction)
 | |
| {
 | |
| 	int dir;
 | |
| #ifdef BCMDMA64OSL
 | |
| 	dma_addr_t paddr;
 | |
| #endif /* BCMDMA64OSL */
 | |
| 
 | |
| 	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
 | |
| 
 | |
| 	DMA_LOCK(osh);
 | |
| 
 | |
| 	dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE;
 | |
| 
 | |
| #ifdef DHD_MAP_LOGGING
 | |
| 	osl_dma_map_logging(osh, osh->dhd_unmap_log, pa, size);
 | |
| #endif /* DHD_MAP_LOGGING */
 | |
| 
 | |
| #ifdef BCMDMA64OSL
 | |
| 	PHYSADDRTOULONG(pa, paddr);
 | |
| 	pci_unmap_single(osh->pdev, paddr, size, dir);
 | |
| #else /* BCMDMA64OSL */
 | |
| 
 | |
| #ifdef STB_SOC_WIFI
 | |
| #if (__LINUX_ARM_ARCH__ == 8)
 | |
| 	if (dir == DMA_TX) { /* to device */
 | |
| 		dma_sync_single_for_device(OSH_NULL, pa, size, DMA_TX);
 | |
| 	} else if (dir == DMA_RX) { /* from device */
 | |
| 		dma_sync_single_for_cpu(OSH_NULL, pa, size, DMA_RX);
 | |
| 	} else { /* both */
 | |
| 		dma_sync_single_for_device(OSH_NULL, pa, size, DMA_TX);
 | |
| 		dma_sync_single_for_cpu(OSH_NULL, pa, size, DMA_RX);
 | |
| 	}
 | |
| #else /* (__LINUX_ARM_ARCH__ == 8) */
 | |
| 	dma_unmap_single(osh->pdev, (uintptr)pa, size, dir);
 | |
| #endif /* (__LINUX_ARM_ARCH__ == 8) */
 | |
| #else /* STB_SOC_WIFI */
 | |
| 	pci_unmap_single(osh->pdev, (uint32)pa, size, dir);
 | |
| #endif /* STB_SOC_WIFI */
 | |
| 
 | |
| #endif /* BCMDMA64OSL */
 | |
| 
 | |
| 	DMA_UNLOCK(osh);
 | |
| }
 | |
| 
 | |
| /* OSL function for CPU relax */
 | |
| inline void BCMFASTPATH
 | |
| osl_cpu_relax(void)
 | |
| {
 | |
| 	cpu_relax();
 | |
| }
 | |
| 
 | |
| extern void osl_preempt_disable(osl_t *osh)
 | |
| {
 | |
| 	preempt_disable();
 | |
| }
 | |
| 
 | |
| extern void osl_preempt_enable(osl_t *osh)
 | |
| {
 | |
| 	preempt_enable();
 | |
| }
 | |
| 
 | |
| #if defined(BCMASSERT_LOG)
 | |
| void
 | |
| osl_assert(const char *exp, const char *file, int line)
 | |
| {
 | |
| 	char tempbuf[256];
 | |
| 	const char *basename;
 | |
| 
 | |
| 	basename = strrchr(file, '/');
 | |
| 	/* skip the '/' */
 | |
| 	if (basename)
 | |
| 		basename++;
 | |
| 
 | |
| 	if (!basename)
 | |
| 		basename = file;
 | |
| 
 | |
| #ifdef BCMASSERT_LOG
 | |
| 	snprintf(tempbuf, 64, "\"%s\": file \"%s\", line %d\n",
 | |
| 		exp, basename, line);
 | |
| #endif /* BCMASSERT_LOG */
 | |
| 
 | |
| 	switch (g_assert_type) {
 | |
| 	case 0:
 | |
| 		panic("%s", tempbuf);
 | |
| 		break;
 | |
| 	case 1:
 | |
| 		/* fall through */
 | |
| 	case 3:
 | |
| 		printk("%s", tempbuf);
 | |
| 		break;
 | |
| 	case 2:
 | |
| 		printk("%s", tempbuf);
 | |
| 		BUG();
 | |
| 		break;
 | |
| 	default:
 | |
| 		break;
 | |
| 	}
 | |
| }
 | |
| #endif // endif
 | |
| 
 | |
| void
 | |
| osl_delay(uint usec)
 | |
| {
 | |
| 	uint d;
 | |
| 
 | |
| 	while (usec > 0) {
 | |
| 		d = MIN(usec, 1000);
 | |
| 		udelay(d);
 | |
| 		usec -= d;
 | |
| 	}
 | |
| }
 | |
| 
 | |
| void
 | |
| osl_sleep(uint ms)
 | |
| {
 | |
| 	if (ms < 20)
 | |
| 		usleep_range(ms*1000, ms*1000 + 1000);
 | |
| 	else
 | |
| 		msleep(ms);
 | |
| }
 | |
| 
 | |
| uint64
 | |
| osl_sysuptime_us(void)
 | |
| {
 | |
| 	struct osl_timespec tv;
 | |
| 	uint64 usec;
 | |
| 
 | |
| 	osl_do_gettimeofday(&tv);
 | |
| 	/* tv_usec content is fraction of a second */
 | |
| 	usec = (uint64)tv.tv_sec * 1000000ul + tv.tv_usec;
 | |
| 	return usec;
 | |
| }
 | |
| 
 | |
| uint64
 | |
| osl_localtime_ns(void)
 | |
| {
 | |
| 	uint64 ts_nsec = 0;
 | |
| 
 | |
| 	ts_nsec = local_clock();
 | |
| 
 | |
| 	return ts_nsec;
 | |
| }
 | |
| 
 | |
| void
 | |
| osl_get_localtime(uint64 *sec, uint64 *usec)
 | |
| {
 | |
| 	uint64 ts_nsec = 0;
 | |
| 	unsigned long rem_nsec = 0;
 | |
| 
 | |
| 	ts_nsec = local_clock();
 | |
| 	rem_nsec = do_div(ts_nsec, NSEC_PER_SEC);
 | |
| 	*sec = (uint64)ts_nsec;
 | |
| 	*usec = (uint64)(rem_nsec / MSEC_PER_SEC);
 | |
| }
 | |
| 
 | |
| uint64
 | |
| osl_systztime_us(void)
 | |
| {
 | |
| 	struct osl_timespec tv;
 | |
| 	uint64 tzusec;
 | |
| 
 | |
| 	osl_do_gettimeofday(&tv);
 | |
| 	/* apply timezone */
 | |
| 	tzusec = (uint64)((tv.tv_sec - (sys_tz.tz_minuteswest * 60)) *
 | |
| 		USEC_PER_SEC);
 | |
| 	tzusec += tv.tv_usec;
 | |
| 
 | |
| 	return tzusec;
 | |
| }
 | |
| 
 | |
| /*
 | |
|  * OSLREGOPS specifies the use of osl_XXX routines to be used for register access
 | |
|  */
 | |
| 
 | |
| /*
 | |
|  * BINOSL selects the slightly slower function-call-based binary compatible osl.
 | |
|  */
 | |
| 
 | |
| uint32
 | |
| osl_rand(void)
 | |
| {
 | |
| 	uint32 rand;
 | |
| 
 | |
| 	get_random_bytes(&rand, sizeof(rand));
 | |
| 
 | |
| 	return rand;
 | |
| }
 | |
| 
 | |
| /* Linux Kernel: File Operations: start */
 | |
| void *
 | |
| osl_os_open_image(char *filename)
 | |
| {
 | |
| 	struct file *fp;
 | |
| 
 | |
| 	fp = filp_open(filename, O_RDONLY, 0);
 | |
| 	/*
 | |
| 	 * 2.6.11 (FC4) supports filp_open() but later revs don't?
 | |
| 	 * Alternative:
 | |
| 	 * fp = open_namei(AT_FDCWD, filename, O_RD, 0);
 | |
| 	 * ???
 | |
| 	 */
 | |
| 	 if (IS_ERR(fp))
 | |
| 		 fp = NULL;
 | |
| 
 | |
| 	 return fp;
 | |
| }
 | |
| 
 | |
| int
 | |
| osl_os_get_image_block(char *buf, int len, void *image)
 | |
| {
 | |
| 	struct file *fp = (struct file *)image;
 | |
| 	int rdlen;
 | |
| 
 | |
| 	if (!image)
 | |
| 		return 0;
 | |
| 
 | |
| #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0))
 | |
| 	rdlen = kernel_read(fp, buf, len, &fp->f_pos);
 | |
| #else
 | |
| 	rdlen = kernel_read(fp, fp->f_pos, buf, len);
 | |
| #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) */
 | |
| 
 | |
| 	if (rdlen > 0)
 | |
| 		fp->f_pos += rdlen;
 | |
| 
 | |
| 	return rdlen;
 | |
| }
 | |
| 
 | |
| void
 | |
| osl_os_close_image(void *image)
 | |
| {
 | |
| 	if (image)
 | |
| 		filp_close((struct file *)image, NULL);
 | |
| }
 | |
| 
 | |
| int
 | |
| osl_os_image_size(void *image)
 | |
| {
 | |
| 	int len = 0, curroffset;
 | |
| 
 | |
| 	if (image) {
 | |
| 		/* store the current offset */
 | |
| 		curroffset = generic_file_llseek(image, 0, 1);
 | |
| 		/* goto end of file to get length */
 | |
| 		len = generic_file_llseek(image, 0, 2);
 | |
| 		/* restore back the offset */
 | |
| 		generic_file_llseek(image, curroffset, 0);
 | |
| 	}
 | |
| 	return len;
 | |
| }
 | |
| 
 | |
| /* Linux Kernel: File Operations: end */
 | |
| 
 | |
| #if (defined(STB) && defined(__arm__))
 | |
| inline void osl_pcie_rreg(osl_t *osh, ulong addr, volatile void *v, uint size)
 | |
| {
 | |
| 	unsigned long flags = 0;
 | |
| 	int pci_access = 0;
 | |
| 	int acp_war_enab = ACP_WAR_ENAB();
 | |
| 
 | |
| 	if (osh && BUSTYPE(osh->bustype) == PCI_BUS)
 | |
| 		pci_access = 1;
 | |
| 
 | |
| 	if (pci_access && acp_war_enab)
 | |
| 		spin_lock_irqsave(&l2x0_reg_lock, flags);
 | |
| 
 | |
| 	switch (size) {
 | |
| 	case sizeof(uint8):
 | |
| 		*(volatile uint8*)v = readb((volatile uint8*)(addr));
 | |
| 		break;
 | |
| 	case sizeof(uint16):
 | |
| 		*(volatile uint16*)v = readw((volatile uint16*)(addr));
 | |
| 		break;
 | |
| 	case sizeof(uint32):
 | |
| 		*(volatile uint32*)v = readl((volatile uint32*)(addr));
 | |
| 		break;
 | |
| 	case sizeof(uint64):
 | |
| 		*(volatile uint64*)v = *((volatile uint64*)(addr));
 | |
| 		break;
 | |
| 	}
 | |
| 
 | |
| 	if (pci_access && acp_war_enab)
 | |
| 		spin_unlock_irqrestore(&l2x0_reg_lock, flags);
 | |
| }
 | |
| #endif // endif
 | |
| 
 | |
| #if defined(BCM_BACKPLANE_TIMEOUT)
 | |
| inline void osl_bpt_rreg(osl_t *osh, ulong addr, volatile void *v, uint size)
 | |
| {
 | |
| 	bool poll_timeout = FALSE;
 | |
| 	static int in_si_clear = FALSE;
 | |
| 
 | |
| 	switch (size) {
 | |
| 	case sizeof(uint8):
 | |
| 		*(volatile uint8*)v = readb((volatile uint8*)(addr));
 | |
| 		if (*(volatile uint8*)v == 0xff)
 | |
| 			poll_timeout = TRUE;
 | |
| 		break;
 | |
| 	case sizeof(uint16):
 | |
| 		*(volatile uint16*)v = readw((volatile uint16*)(addr));
 | |
| 		if (*(volatile uint16*)v == 0xffff)
 | |
| 			poll_timeout = TRUE;
 | |
| 		break;
 | |
| 	case sizeof(uint32):
 | |
| 		*(volatile uint32*)v = readl((volatile uint32*)(addr));
 | |
| 		if (*(volatile uint32*)v == 0xffffffff)
 | |
| 			poll_timeout = TRUE;
 | |
| 		break;
 | |
| 	case sizeof(uint64):
 | |
| 		*(volatile uint64*)v = *((volatile uint64*)(addr));
 | |
| 		if (*(volatile uint64*)v == 0xffffffffffffffff)
 | |
| 			poll_timeout = TRUE;
 | |
| 		break;
 | |
| 	}
 | |
| 
 | |
| 	if (osh && osh->sih && (in_si_clear == FALSE) && poll_timeout && osh->bpt_cb) {
 | |
| 		in_si_clear = TRUE;
 | |
| 		osh->bpt_cb((void *)osh->sih, (void *)addr);
 | |
| 		in_si_clear = FALSE;
 | |
| 	}
 | |
| }
 | |
| #endif /* BCM_BACKPLANE_TIMEOUT */
 | |
| 
 | |
| #ifdef BCM_SECURE_DMA
 | |
| static void *
 | |
| osl_sec_dma_ioremap(osl_t *osh, struct page *page, size_t size, bool iscache, bool isdecr)
 | |
| {
 | |
| 
 | |
| 	struct page **map;
 | |
| 	int order, i;
 | |
| 	void *addr = NULL;
 | |
| 
 | |
| 	size = PAGE_ALIGN(size);
 | |
| 	order = get_order(size);
 | |
| 
 | |
| 	map = kmalloc(sizeof(struct page *) << order, GFP_ATOMIC);
 | |
| 
 | |
| 	if (map == NULL)
 | |
| 		return NULL;
 | |
| 
 | |
| 	for (i = 0; i < (size >> PAGE_SHIFT); i++)
 | |
| 		map[i] = page + i;
 | |
| 
 | |
| 	if (iscache) {
 | |
| 		addr = vmap(map, size >> PAGE_SHIFT, VM_MAP, __pgprot(PAGE_KERNEL));
 | |
| 		if (isdecr) {
 | |
| 			osh->contig_delta_va_pa = ((uint8 *)addr - page_to_phys(page));
 | |
| 		}
 | |
| 	} else {
 | |
| 
 | |
| #if defined(__ARM_ARCH_7A__)
 | |
| 		addr = vmap(map, size >> PAGE_SHIFT, VM_MAP,
 | |
| 			pgprot_noncached(__pgprot(PAGE_KERNEL)));
 | |
| #endif // endif
 | |
| 		if (isdecr) {
 | |
| 			osh->contig_delta_va_pa = ((uint8 *)addr - page_to_phys(page));
 | |
| 		}
 | |
| 	}
 | |
| 
 | |
| 	kfree(map);
 | |
| 	return (void *)addr;
 | |
| }
 | |
| 
 | |
| static void
 | |
| osl_sec_dma_iounmap(osl_t *osh, void *contig_base_va, size_t size)
 | |
| {
 | |
| 	vunmap(contig_base_va);
 | |
| }
 | |
| 
 | |
| static int
 | |
| osl_sec_dma_init_elem_mem_block(osl_t *osh, size_t mbsize, int max, sec_mem_elem_t **list)
 | |
| {
 | |
| 	int i;
 | |
| 	int ret = BCME_OK;
 | |
| 	sec_mem_elem_t *sec_mem_elem;
 | |
| 
 | |
| 	if ((sec_mem_elem = kmalloc(sizeof(sec_mem_elem_t)*(max), GFP_ATOMIC)) != NULL) {
 | |
| 
 | |
| 		*list = sec_mem_elem;
 | |
| 		bzero(sec_mem_elem, sizeof(sec_mem_elem_t)*(max));
 | |
| 		for (i = 0; i < max-1; i++) {
 | |
| 			sec_mem_elem->next = (sec_mem_elem + 1);
 | |
| 			sec_mem_elem->size = mbsize;
 | |
| 			sec_mem_elem->pa_cma = osh->contig_base_alloc;
 | |
| 			sec_mem_elem->vac = osh->contig_base_alloc_va;
 | |
| 
 | |
| 			sec_mem_elem->pa_cma_page = phys_to_page(sec_mem_elem->pa_cma);
 | |
| 			osh->contig_base_alloc += mbsize;
 | |
| 			osh->contig_base_alloc_va = ((uint8 *)osh->contig_base_alloc_va +  mbsize);
 | |
| 
 | |
| 			sec_mem_elem = sec_mem_elem + 1;
 | |
| 		}
 | |
| 		sec_mem_elem->next = NULL;
 | |
| 		sec_mem_elem->size = mbsize;
 | |
| 		sec_mem_elem->pa_cma = osh->contig_base_alloc;
 | |
| 		sec_mem_elem->vac = osh->contig_base_alloc_va;
 | |
| 
 | |
| 		sec_mem_elem->pa_cma_page = phys_to_page(sec_mem_elem->pa_cma);
 | |
| 		osh->contig_base_alloc += mbsize;
 | |
| 		osh->contig_base_alloc_va = ((uint8 *)osh->contig_base_alloc_va +  mbsize);
 | |
| 
 | |
| 	} else {
 | |
| 		printf("%s sec mem elem kmalloc failed\n", __FUNCTION__);
 | |
| 		ret = BCME_ERROR;
 | |
| 	}
 | |
| 	return ret;
 | |
| }
 | |
| 
 | |
| static void
 | |
| osl_sec_dma_deinit_elem_mem_block(osl_t *osh, size_t mbsize, int max, void *sec_list_base)
 | |
| {
 | |
| 	if (sec_list_base)
 | |
| 		kfree(sec_list_base);
 | |
| }
 | |
| 
 | |
| static sec_mem_elem_t * BCMFASTPATH
 | |
| osl_sec_dma_alloc_mem_elem(osl_t *osh, void *va, uint size, int direction,
 | |
| 	struct sec_cma_info *ptr_cma_info, uint offset)
 | |
| {
 | |
| 	sec_mem_elem_t *sec_mem_elem = NULL;
 | |
| 
 | |
| #ifdef NOT_YET
 | |
| 	if (size <= 512 && osh->sec_list_512) {
 | |
| 		sec_mem_elem = osh->sec_list_512;
 | |
| 		osh->sec_list_512 = sec_mem_elem->next;
 | |
| 	}
 | |
| 	else if (size <= 2048 && osh->sec_list_2048) {
 | |
| 		sec_mem_elem = osh->sec_list_2048;
 | |
| 		osh->sec_list_2048 = sec_mem_elem->next;
 | |
| 	}
 | |
| 	else
 | |
| #else
 | |
| 		ASSERT(osh->sec_list_4096);
 | |
| 		sec_mem_elem = osh->sec_list_4096;
 | |
| 		osh->sec_list_4096 = sec_mem_elem->next;
 | |
| #endif /* NOT_YET */
 | |
| 
 | |
| 		sec_mem_elem->next = NULL;
 | |
| 
 | |
| 	if (ptr_cma_info->sec_alloc_list_tail) {
 | |
| 		ptr_cma_info->sec_alloc_list_tail->next = sec_mem_elem;
 | |
| 		ptr_cma_info->sec_alloc_list_tail = sec_mem_elem;
 | |
| 	}
 | |
| 	else {
 | |
| 		/* First allocation: If tail is NULL, sec_alloc_list MUST also be NULL */
 | |
| 		ASSERT(ptr_cma_info->sec_alloc_list == NULL);
 | |
| 		ptr_cma_info->sec_alloc_list = sec_mem_elem;
 | |
| 		ptr_cma_info->sec_alloc_list_tail = sec_mem_elem;
 | |
| 	}
 | |
| 	return sec_mem_elem;
 | |
| }
 | |
| 
 | |
| static void BCMFASTPATH
 | |
| osl_sec_dma_free_mem_elem(osl_t *osh, sec_mem_elem_t *sec_mem_elem)
 | |
| {
 | |
| 	sec_mem_elem->dma_handle = 0x0;
 | |
| 	sec_mem_elem->va = NULL;
 | |
| #ifdef NOT_YET
 | |
| 	if (sec_mem_elem->size == 512) {
 | |
| 		sec_mem_elem->next = osh->sec_list_512;
 | |
| 		osh->sec_list_512 = sec_mem_elem;
 | |
| 	} else if (sec_mem_elem->size == 2048) {
 | |
| 		sec_mem_elem->next = osh->sec_list_2048;
 | |
| 		osh->sec_list_2048 = sec_mem_elem;
 | |
| 	} else if (sec_mem_elem->size == 4096) {
 | |
| #endif /* NOT_YET */
 | |
| 		sec_mem_elem->next = osh->sec_list_4096;
 | |
| 		osh->sec_list_4096 = sec_mem_elem;
 | |
| #ifdef NOT_YET
 | |
| 	}
 | |
| 	else
 | |
| 		printf("%s free failed size=%d\n", __FUNCTION__, sec_mem_elem->size);
 | |
| #endif /* NOT_YET */
 | |
| }
 | |
| 
 | |
| static sec_mem_elem_t * BCMFASTPATH
 | |
| osl_sec_dma_find_rem_elem(osl_t *osh, struct sec_cma_info *ptr_cma_info, dma_addr_t dma_handle)
 | |
| {
 | |
| 	sec_mem_elem_t *sec_mem_elem = ptr_cma_info->sec_alloc_list;
 | |
| 	sec_mem_elem_t *sec_prv_elem = ptr_cma_info->sec_alloc_list;
 | |
| 
 | |
| 	if (sec_mem_elem->dma_handle == dma_handle) {
 | |
| 
 | |
| 		ptr_cma_info->sec_alloc_list = sec_mem_elem->next;
 | |
| 
 | |
| 		if (sec_mem_elem == ptr_cma_info->sec_alloc_list_tail) {
 | |
| 			ptr_cma_info->sec_alloc_list_tail = NULL;
 | |
| 			ASSERT(ptr_cma_info->sec_alloc_list == NULL);
 | |
| 		}
 | |
| 
 | |
| 		return sec_mem_elem;
 | |
| 	}
 | |
| 	sec_mem_elem = sec_mem_elem->next;
 | |
| 
 | |
| 	while (sec_mem_elem != NULL) {
 | |
| 
 | |
| 		if (sec_mem_elem->dma_handle == dma_handle) {
 | |
| 
 | |
| 			sec_prv_elem->next = sec_mem_elem->next;
 | |
| 			if (sec_mem_elem == ptr_cma_info->sec_alloc_list_tail)
 | |
| 				ptr_cma_info->sec_alloc_list_tail = sec_prv_elem;
 | |
| 
 | |
| 			return sec_mem_elem;
 | |
| 		}
 | |
| 		sec_prv_elem = sec_mem_elem;
 | |
| 		sec_mem_elem = sec_mem_elem->next;
 | |
| 	}
 | |
| 	return NULL;
 | |
| }
 | |
| 
 | |
| static sec_mem_elem_t *
 | |
| osl_sec_dma_rem_first_elem(osl_t *osh, struct sec_cma_info *ptr_cma_info)
 | |
| {
 | |
| 	sec_mem_elem_t *sec_mem_elem = ptr_cma_info->sec_alloc_list;
 | |
| 
 | |
| 	if (sec_mem_elem) {
 | |
| 
 | |
| 		ptr_cma_info->sec_alloc_list = sec_mem_elem->next;
 | |
| 
 | |
| 		if (ptr_cma_info->sec_alloc_list == NULL)
 | |
| 			ptr_cma_info->sec_alloc_list_tail = NULL;
 | |
| 
 | |
| 		return sec_mem_elem;
 | |
| 
 | |
| 	} else
 | |
| 		return NULL;
 | |
| }
 | |
| 
 | |
| static void * BCMFASTPATH
 | |
| osl_sec_dma_last_elem(osl_t *osh, struct sec_cma_info *ptr_cma_info)
 | |
| {
 | |
| 	return ptr_cma_info->sec_alloc_list_tail;
 | |
| }
 | |
| 
 | |
| dma_addr_t BCMFASTPATH
 | |
| osl_sec_dma_map_txmeta(osl_t *osh, void *va, uint size, int direction, void *p,
 | |
| 	hnddma_seg_map_t *dmah, void *ptr_cma_info)
 | |
| {
 | |
| 	sec_mem_elem_t *sec_mem_elem;
 | |
| 	struct page *pa_cma_page;
 | |
| 	uint loffset;
 | |
| 	void *vaorig = ((uint8 *)va + size);
 | |
| 	dma_addr_t dma_handle = 0x0;
 | |
| 	/* packet will be the one added with osl_sec_dma_map() just before this call */
 | |
| 
 | |
| 	sec_mem_elem = osl_sec_dma_last_elem(osh, ptr_cma_info);
 | |
| 
 | |
| 	if (sec_mem_elem && sec_mem_elem->va == vaorig) {
 | |
| 
 | |
| 		pa_cma_page = phys_to_page(sec_mem_elem->pa_cma);
 | |
| 		loffset = sec_mem_elem->pa_cma -(sec_mem_elem->pa_cma & ~(PAGE_SIZE-1));
 | |
| 
 | |
| 		dma_handle = dma_map_page(OSH_NULL, pa_cma_page, loffset, size,
 | |
| 			(direction == DMA_TX ? DMA_TO_DEVICE:DMA_FROM_DEVICE));
 | |
| 
 | |
| 	} else {
 | |
| 		printf("%s: error orig va not found va = 0x%p \n",
 | |
| 			__FUNCTION__, vaorig);
 | |
| 	}
 | |
| 	return dma_handle;
 | |
| }
 | |
| 
 | |
| dma_addr_t BCMFASTPATH
 | |
| osl_sec_dma_map(osl_t *osh, void *va, uint size, int direction, void *p,
 | |
| 	hnddma_seg_map_t *dmah, void *ptr_cma_info, uint offset)
 | |
| {
 | |
| 
 | |
| 	sec_mem_elem_t *sec_mem_elem;
 | |
| 	struct page *pa_cma_page;
 | |
| 	void *pa_cma_kmap_va = NULL;
 | |
| 	uint buflen = 0;
 | |
| 	dma_addr_t dma_handle = 0x0;
 | |
| 	uint loffset;
 | |
| #ifdef NOT_YET
 | |
| 	int *fragva;
 | |
| 	struct sk_buff *skb;
 | |
| 	int i = 0;
 | |
| #endif /* NOT_YET */
 | |
| 
 | |
| 	ASSERT((direction == DMA_RX) || (direction == DMA_TX));
 | |
| 	sec_mem_elem = osl_sec_dma_alloc_mem_elem(osh, va, size, direction, ptr_cma_info, offset);
 | |
| 
 | |
| 	sec_mem_elem->va = va;
 | |
| 	sec_mem_elem->direction = direction;
 | |
| 	pa_cma_page = sec_mem_elem->pa_cma_page;
 | |
| 
 | |
| 	loffset = sec_mem_elem->pa_cma -(sec_mem_elem->pa_cma & ~(PAGE_SIZE-1));
 | |
| 	/* pa_cma_kmap_va = kmap_atomic(pa_cma_page);
 | |
| 	* pa_cma_kmap_va += loffset;
 | |
| 	*/
 | |
| 
 | |
| 	pa_cma_kmap_va = sec_mem_elem->vac;
 | |
| 	pa_cma_kmap_va = ((uint8 *)pa_cma_kmap_va + offset);
 | |
| 	buflen = size;
 | |
| 
 | |
| 	if (direction == DMA_TX) {
 | |
| 		memcpy((uint8*)pa_cma_kmap_va+offset, va, size);
 | |
| 
 | |
| #ifdef NOT_YET
 | |
| 		if (p == NULL) {
 | |
| 
 | |
| 			memcpy(pa_cma_kmap_va, va, size);
 | |
| 			/* prhex("Txpkt",pa_cma_kmap_va, size); */
 | |
| 		} else {
 | |
| 			for (skb = (struct sk_buff *)p; skb != NULL; skb = PKTNEXT(osh, skb)) {
 | |
| 				if (skb_is_nonlinear(skb)) {
 | |
| 
 | |
| 					for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
 | |
| 						skb_frag_t *f = &skb_shinfo(skb)->frags[i];
 | |
| 						fragva = kmap_atomic(skb_frag_page(f));
 | |
| 						pa_cma_kmap_va = ((uint8 *)pa_cma_kmap_va + buflen);
 | |
| 						memcpy((pa_cma_kmap_va),
 | |
| 						(fragva + f->page_offset), skb_frag_size(f));
 | |
| 						kunmap_atomic(fragva);
 | |
| 						buflen += skb_frag_size(f);
 | |
| 					}
 | |
| 				} else {
 | |
| 
 | |
| 					pa_cma_kmap_va = ((uint8 *)pa_cma_kmap_va + buflen);
 | |
| 					memcpy(pa_cma_kmap_va, skb->data, skb->len);
 | |
| 					buflen += skb->len;
 | |
| 				}
 | |
| 			}
 | |
| 
 | |
| 		}
 | |
| #endif /* NOT_YET */
 | |
| 		if (dmah) {
 | |
| 			dmah->nsegs = 1;
 | |
| 			dmah->origsize = buflen;
 | |
| 		}
 | |
| 	}
 | |
| 	else
 | |
| 	{
 | |
| 		if ((p != NULL) && (dmah != NULL)) {
 | |
| 			dmah->nsegs = 1;
 | |
| 			dmah->origsize = buflen;
 | |
| 		}
 | |
| 		*(uint32 *)(pa_cma_kmap_va) = 0x0;
 | |
| 	}
 | |
| 
 | |
| 	if (direction == DMA_RX) {
 | |
| 		flush_kernel_vmap_range(pa_cma_kmap_va, sizeof(int));
 | |
| 	}
 | |
| 		dma_handle = dma_map_page(OSH_NULL, pa_cma_page, loffset+offset, buflen,
 | |
| 			(direction == DMA_TX ? DMA_TO_DEVICE:DMA_FROM_DEVICE));
 | |
| 	if (dmah) {
 | |
| 		dmah->segs[0].addr = dma_handle;
 | |
| 		dmah->segs[0].length = buflen;
 | |
| 	}
 | |
| 	sec_mem_elem->dma_handle = dma_handle;
 | |
| 	/* kunmap_atomic(pa_cma_kmap_va-loffset); */
 | |
| 	return dma_handle;
 | |
| }
 | |
| 
 | |
| dma_addr_t BCMFASTPATH
 | |
| osl_sec_dma_dd_map(osl_t *osh, void *va, uint size, int direction, void *p, hnddma_seg_map_t *map)
 | |
| {
 | |
| 
 | |
| 	struct page *pa_cma_page;
 | |
| 	phys_addr_t pa_cma;
 | |
| 	dma_addr_t dma_handle = 0x0;
 | |
| 	uint loffset;
 | |
| 
 | |
| 	pa_cma = ((uint8 *)va - (uint8 *)osh->contig_delta_va_pa);
 | |
| 	pa_cma_page = phys_to_page(pa_cma);
 | |
| 	loffset = pa_cma -(pa_cma & ~(PAGE_SIZE-1));
 | |
| 
 | |
| 	dma_handle = dma_map_page(OSH_NULL, pa_cma_page, loffset, size,
 | |
| 		(direction == DMA_TX ? DMA_TO_DEVICE:DMA_FROM_DEVICE));
 | |
| 
 | |
| 	return dma_handle;
 | |
| }
 | |
| 
 | |
| void BCMFASTPATH
 | |
| osl_sec_dma_unmap(osl_t *osh, dma_addr_t dma_handle, uint size, int direction,
 | |
| void *p, hnddma_seg_map_t *map,	void *ptr_cma_info, uint offset)
 | |
| {
 | |
| 	sec_mem_elem_t *sec_mem_elem;
 | |
| #ifdef NOT_YET
 | |
| 	struct page *pa_cma_page;
 | |
| #endif // endif
 | |
| 	void *pa_cma_kmap_va = NULL;
 | |
| 	uint buflen = 0;
 | |
| 	dma_addr_t pa_cma;
 | |
| 	void *va;
 | |
| 	int read_count = 0;
 | |
| 	BCM_REFERENCE(buflen);
 | |
| 	BCM_REFERENCE(read_count);
 | |
| 
 | |
| 	sec_mem_elem = osl_sec_dma_find_rem_elem(osh, ptr_cma_info, dma_handle);
 | |
| 	ASSERT(sec_mem_elem);
 | |
| 
 | |
| 	va = sec_mem_elem->va;
 | |
| 	va = (uint8 *)va - offset;
 | |
| 	pa_cma = sec_mem_elem->pa_cma;
 | |
| 
 | |
| #ifdef NOT_YET
 | |
| 	pa_cma_page = sec_mem_elem->pa_cma_page;
 | |
| #endif // endif
 | |
| 
 | |
| 	if (direction == DMA_RX) {
 | |
| 
 | |
| 		if (p == NULL) {
 | |
| 
 | |
| 			/* pa_cma_kmap_va = kmap_atomic(pa_cma_page);
 | |
| 			* pa_cma_kmap_va += loffset;
 | |
| 			*/
 | |
| 
 | |
| 			pa_cma_kmap_va = sec_mem_elem->vac;
 | |
| 
 | |
| 			do {
 | |
| 				invalidate_kernel_vmap_range(pa_cma_kmap_va, sizeof(int));
 | |
| 
 | |
| 				buflen = *(uint *)(pa_cma_kmap_va);
 | |
| 				if (buflen)
 | |
| 					break;
 | |
| 
 | |
| 				OSL_DELAY(1);
 | |
| 				read_count++;
 | |
| 			} while (read_count < 200);
 | |
| 			dma_unmap_page(OSH_NULL, pa_cma, size, DMA_FROM_DEVICE);
 | |
| 			memcpy(va, pa_cma_kmap_va, size);
 | |
| 			/* kunmap_atomic(pa_cma_kmap_va); */
 | |
| 		}
 | |
| #ifdef NOT_YET
 | |
| 		else {
 | |
| 			buflen = 0;
 | |
| 			for (skb = (struct sk_buff *)p; (buflen < size) &&
 | |
| 				(skb != NULL); skb = skb->next) {
 | |
| 				if (skb_is_nonlinear(skb)) {
 | |
| 					pa_cma_kmap_va = kmap_atomic(pa_cma_page);
 | |
| 					for (i = 0; (buflen < size) &&
 | |
| 						(i < skb_shinfo(skb)->nr_frags); i++) {
 | |
| 						skb_frag_t *f = &skb_shinfo(skb)->frags[i];
 | |
| 						cpuaddr = kmap_atomic(skb_frag_page(f));
 | |
| 						pa_cma_kmap_va = ((uint8 *)pa_cma_kmap_va + buflen);
 | |
| 						memcpy((cpuaddr + f->page_offset),
 | |
| 							pa_cma_kmap_va, skb_frag_size(f));
 | |
| 						kunmap_atomic(cpuaddr);
 | |
| 						buflen += skb_frag_size(f);
 | |
| 					}
 | |
| 						kunmap_atomic(pa_cma_kmap_va);
 | |
| 				} else {
 | |
| 					pa_cma_kmap_va = kmap_atomic(pa_cma_page);
 | |
| 					pa_cma_kmap_va = ((uint8 *)pa_cma_kmap_va + buflen);
 | |
| 					memcpy(skb->data, pa_cma_kmap_va, skb->len);
 | |
| 					kunmap_atomic(pa_cma_kmap_va);
 | |
| 					buflen += skb->len;
 | |
| 				}
 | |
| 
 | |
| 			}
 | |
| 
 | |
| 		}
 | |
| #endif /* NOT YET */
 | |
| 	} else {
 | |
| 		dma_unmap_page(OSH_NULL, pa_cma, size+offset, DMA_TO_DEVICE);
 | |
| 	}
 | |
| 
 | |
| 	osl_sec_dma_free_mem_elem(osh, sec_mem_elem);
 | |
| }
 | |
| 
 | |
| void
 | |
| osl_sec_dma_unmap_all(osl_t *osh, void *ptr_cma_info)
 | |
| {
 | |
| 
 | |
| 	sec_mem_elem_t *sec_mem_elem;
 | |
| 
 | |
| 	sec_mem_elem = osl_sec_dma_rem_first_elem(osh, ptr_cma_info);
 | |
| 
 | |
| 	while (sec_mem_elem != NULL) {
 | |
| 
 | |
| 		dma_unmap_page(OSH_NULL, sec_mem_elem->pa_cma, sec_mem_elem->size,
 | |
| 			sec_mem_elem->direction == DMA_TX ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
 | |
| 		osl_sec_dma_free_mem_elem(osh, sec_mem_elem);
 | |
| 
 | |
| 		sec_mem_elem = osl_sec_dma_rem_first_elem(osh, ptr_cma_info);
 | |
| 	}
 | |
| }
 | |
| 
 | |
| static void
 | |
| osl_sec_dma_init_consistent(osl_t *osh)
 | |
| {
 | |
| 	int i;
 | |
| 	void *temp_va = osh->contig_base_alloc_coherent_va;
 | |
| 	phys_addr_t temp_pa = osh->contig_base_alloc_coherent;
 | |
| 
 | |
| 	for (i = 0; i < SEC_CMA_COHERENT_MAX; i++) {
 | |
| 		osh->sec_cma_coherent[i].avail = TRUE;
 | |
| 		osh->sec_cma_coherent[i].va = temp_va;
 | |
| 		osh->sec_cma_coherent[i].pa = temp_pa;
 | |
| 		temp_va = ((uint8 *)temp_va)+SEC_CMA_COHERENT_BLK;
 | |
| 		temp_pa += SEC_CMA_COHERENT_BLK;
 | |
| 	}
 | |
| }
 | |
| 
 | |
| static void *
 | |
| osl_sec_dma_alloc_consistent(osl_t *osh, uint size, uint16 align_bits, ulong *pap)
 | |
| {
 | |
| 
 | |
| 	void *temp_va = NULL;
 | |
| 	ulong temp_pa = 0;
 | |
| 	int i;
 | |
| 
 | |
| 	if (size > SEC_CMA_COHERENT_BLK) {
 | |
| 		printf("%s unsupported size\n", __FUNCTION__);
 | |
| 		return NULL;
 | |
| 	}
 | |
| 
 | |
| 	for (i = 0; i < SEC_CMA_COHERENT_MAX; i++) {
 | |
| 		if (osh->sec_cma_coherent[i].avail == TRUE) {
 | |
| 			temp_va = osh->sec_cma_coherent[i].va;
 | |
| 			temp_pa = osh->sec_cma_coherent[i].pa;
 | |
| 			osh->sec_cma_coherent[i].avail = FALSE;
 | |
| 			break;
 | |
| 		}
 | |
| 	}
 | |
| 
 | |
| 	if (i == SEC_CMA_COHERENT_MAX)
 | |
| 		printf("%s:No coherent mem: va = 0x%p pa = 0x%lx size = %d\n", __FUNCTION__,
 | |
| 			temp_va, (ulong)temp_pa, size);
 | |
| 
 | |
| 	*pap = (unsigned long)temp_pa;
 | |
| 	return temp_va;
 | |
| }
 | |
| 
 | |
| static void
 | |
| osl_sec_dma_free_consistent(osl_t *osh, void *va, uint size, dmaaddr_t pa)
 | |
| {
 | |
| 	int i = 0;
 | |
| 
 | |
| 	for (i = 0; i < SEC_CMA_COHERENT_MAX; i++) {
 | |
| 		if (osh->sec_cma_coherent[i].va == va) {
 | |
| 			osh->sec_cma_coherent[i].avail = TRUE;
 | |
| 			break;
 | |
| 		}
 | |
| 	}
 | |
| 	if (i == SEC_CMA_COHERENT_MAX)
 | |
| 		printf("%s:Error: va = 0x%p pa = 0x%lx size = %d\n", __FUNCTION__,
 | |
| 			va, (ulong)pa, size);
 | |
| }
 | |
| #endif /* BCM_SECURE_DMA */
 | |
| 
 | |
| /* timer apis */
 | |
| /* Note: All timer api's are thread unsafe and should be protected with locks by caller */
 | |
| 
 | |
| #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
 | |
| void
 | |
| timer_cb_compat(struct timer_list *tl)
 | |
| {
 | |
| 	timer_list_compat_t *t = container_of(tl, timer_list_compat_t, timer);
 | |
| 	t->callback((ulong)t->arg);
 | |
| }
 | |
| #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0) */
 | |
| 
 | |
| osl_timer_t *
 | |
| osl_timer_init(osl_t *osh, const char *name, void (*fn)(void *arg), void *arg)
 | |
| {
 | |
| 	osl_timer_t *t;
 | |
| 	BCM_REFERENCE(fn);
 | |
| 	if ((t = MALLOCZ(NULL, sizeof(osl_timer_t))) == NULL) {
 | |
| 		printk(KERN_ERR "osl_timer_init: out of memory, malloced %d bytes\n",
 | |
| 			(int)sizeof(osl_timer_t));
 | |
| 		return (NULL);
 | |
| 	}
 | |
| 	bzero(t, sizeof(osl_timer_t));
 | |
| 	if ((t->timer = MALLOCZ(NULL, sizeof(struct timer_list))) == NULL) {
 | |
| 		printf("osl_timer_init: malloc failed\n");
 | |
| 		MFREE(NULL, t, sizeof(osl_timer_t));
 | |
| 		return (NULL);
 | |
| 	}
 | |
| 	t->set = TRUE;
 | |
| 
 | |
| 	init_timer_compat(t->timer, (linux_timer_fn)fn, arg);
 | |
| 
 | |
| 	return (t);
 | |
| }
 | |
| 
 | |
| void
 | |
| osl_timer_add(osl_t *osh, osl_timer_t *t, uint32 ms, bool periodic)
 | |
| {
 | |
| 	if (t == NULL) {
 | |
| 		printf("%s: Timer handle is NULL\n", __FUNCTION__);
 | |
| 		return;
 | |
| 	}
 | |
| 	ASSERT(!t->set);
 | |
| 
 | |
| 	t->set = TRUE;
 | |
| 	if (periodic) {
 | |
| 		printf("Periodic timers are not supported by Linux timer apis\n");
 | |
| 	}
 | |
| 	timer_expires(t->timer) = jiffies + ms*HZ/1000;
 | |
| 
 | |
| 	add_timer(t->timer);
 | |
| 
 | |
| 	return;
 | |
| }
 | |
| 
 | |
| void
 | |
| osl_timer_update(osl_t *osh, osl_timer_t *t, uint32 ms, bool periodic)
 | |
| {
 | |
| 	if (t == NULL) {
 | |
| 		printf("%s: Timer handle is NULL\n", __FUNCTION__);
 | |
| 		return;
 | |
| 	}
 | |
| 	if (periodic) {
 | |
| 		printf("Periodic timers are not supported by Linux timer apis\n");
 | |
| 	}
 | |
| 	t->set = TRUE;
 | |
| 	timer_expires(t->timer) = jiffies + ms*HZ/1000;
 | |
| 
 | |
| 	mod_timer(t->timer, timer_expires(t->timer));
 | |
| 
 | |
| 	return;
 | |
| }
 | |
| 
 | |
| /*
 | |
|  * Return TRUE if timer successfully deleted, FALSE if still pending
 | |
|  */
 | |
| bool
 | |
| osl_timer_del(osl_t *osh, osl_timer_t *t)
 | |
| {
 | |
| 	if (t == NULL) {
 | |
| 		printf("%s: Timer handle is NULL\n", __FUNCTION__);
 | |
| 		return (FALSE);
 | |
| 	}
 | |
| 	if (t->set) {
 | |
| 		t->set = FALSE;
 | |
| 		if (t->timer) {
 | |
| 			del_timer(t->timer);
 | |
| 			MFREE(NULL, t->timer, sizeof(struct timer_list));
 | |
| 		}
 | |
| 		MFREE(NULL, t, sizeof(osl_timer_t));
 | |
| 	}
 | |
| 	return (TRUE);
 | |
| }
 | |
| #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0))
 | |
| int
 | |
| kernel_read_compat(struct file *file, loff_t offset, char *addr, unsigned long count)
 | |
| {
 | |
| 	return (int)kernel_read(file, addr, (size_t)count, &offset);
 | |
| }
 | |
| #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) */
 | |
| 
 | |
| void *
 | |
| osl_spin_lock_init(osl_t *osh)
 | |
| {
 | |
| 	/* Adding 4 bytes since the sizeof(spinlock_t) could be 0 */
 | |
| 	/* if CONFIG_SMP and CONFIG_DEBUG_SPINLOCK are not defined */
 | |
| 	/* and this results in kernel asserts in internal builds */
 | |
| 	spinlock_t * lock = MALLOC(osh, sizeof(spinlock_t) + 4);
 | |
| 	if (lock)
 | |
| 		spin_lock_init(lock);
 | |
| 	return ((void *)lock);
 | |
| }
 | |
| 
 | |
| void
 | |
| osl_spin_lock_deinit(osl_t *osh, void *lock)
 | |
| {
 | |
| 	if (lock)
 | |
| 		MFREE(osh, lock, sizeof(spinlock_t) + 4);
 | |
| }
 | |
| 
 | |
| unsigned long
 | |
| osl_spin_lock(void *lock)
 | |
| {
 | |
| 	unsigned long flags = 0;
 | |
| 
 | |
| 	if (lock)
 | |
| 		spin_lock_irqsave((spinlock_t *)lock, flags);
 | |
| 
 | |
| 	return flags;
 | |
| }
 | |
| 
 | |
| void
 | |
| osl_spin_unlock(void *lock, unsigned long flags)
 | |
| {
 | |
| 	if (lock)
 | |
| 		spin_unlock_irqrestore((spinlock_t *)lock, flags);
 | |
| }
 | |
| 
 | |
| #ifdef USE_DMA_LOCK
 | |
| static void
 | |
| osl_dma_lock(osl_t *osh)
 | |
| {
 | |
| 	if (likely(in_irq() || irqs_disabled())) {
 | |
| 		spin_lock(&osh->dma_lock);
 | |
| 	} else {
 | |
| 		spin_lock_bh(&osh->dma_lock);
 | |
| 		osh->dma_lock_bh = TRUE;
 | |
| 	}
 | |
| }
 | |
| 
 | |
| static void
 | |
| osl_dma_unlock(osl_t *osh)
 | |
| {
 | |
| 	if (unlikely(osh->dma_lock_bh)) {
 | |
| 		osh->dma_lock_bh = FALSE;
 | |
| 		spin_unlock_bh(&osh->dma_lock);
 | |
| 	} else {
 | |
| 		spin_unlock(&osh->dma_lock);
 | |
| 	}
 | |
| }
 | |
| 
 | |
| static void
 | |
| osl_dma_lock_init(osl_t *osh)
 | |
| {
 | |
| 	spin_lock_init(&osh->dma_lock);
 | |
| 	osh->dma_lock_bh = FALSE;
 | |
| }
 | |
| #endif /* USE_DMA_LOCK */
 | |
| 
 | |
| void
 | |
| osl_do_gettimeofday(struct osl_timespec *ts)
 | |
| {
 | |
| #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0)
 | |
| 	struct timespec64 curtime;
 | |
| #elif LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0)
 | |
| 	struct timespec curtime;
 | |
| #else
 | |
| 	struct timeval curtime;
 | |
| #endif
 | |
| 
 | |
| #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0)
 | |
| 	ktime_get_real_ts64(&curtime);
 | |
| 	ts->tv_nsec = curtime.tv_nsec;
 | |
| 	ts->tv_usec	= curtime.tv_nsec / 1000;
 | |
| #elif LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0)
 | |
| 	getnstimeofday(&curtime);
 | |
| 	ts->tv_nsec = curtime.tv_nsec;
 | |
| 	ts->tv_usec = curtime.tv_nsec / 1000;
 | |
| #else
 | |
| 	do_gettimeofday(&curtime);
 | |
| 	ts->tv_usec = curtime.tv_usec;
 | |
| 	ts->tv_nsec = curtime.tv_usec * 1000;
 | |
| #endif
 | |
| 	ts->tv_sec = curtime.tv_sec;
 | |
| }
 | |
| 
 | |
| uint32
 | |
| osl_do_gettimediff(struct osl_timespec *cur_ts, struct osl_timespec *old_ts)
 | |
| {
 | |
| 	uint32 diff_s, diff_us, total_diff_us;
 | |
| 	bool pgc_g = FALSE;
 | |
| 
 | |
| 	diff_s = (uint32)cur_ts->tv_sec - (uint32)old_ts->tv_sec;
 | |
| 	pgc_g = (cur_ts->tv_usec > old_ts->tv_usec) ? TRUE : FALSE;
 | |
| 	diff_us = pgc_g ? (cur_ts->tv_usec - old_ts->tv_usec) : (old_ts->tv_usec - cur_ts->tv_usec);
 | |
| 	total_diff_us = pgc_g ? (diff_s * 1000000 + diff_us) : (diff_s * 1000000 - diff_us);
 | |
| 	return total_diff_us;
 | |
| }
 | |
| 
 | |
| void
 | |
| osl_get_monotonic_boottime(struct osl_timespec *ts)
 | |
| {
 | |
| #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0)
 | |
| 	struct timespec64 curtime;
 | |
| #else
 | |
| 	struct timespec curtime;
 | |
| #endif
 | |
| 
 | |
| #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0)
 | |
| 	curtime = ktime_to_timespec64(ktime_get_boottime());
 | |
| #elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
 | |
| 	curtime = ktime_to_timespec(ktime_get_boottime());
 | |
| #else
 | |
| 	get_monotonic_boottime(&curtime);
 | |
| #endif
 | |
| 	ts->tv_sec = curtime.tv_sec;
 | |
| 	ts->tv_nsec = curtime.tv_nsec;
 | |
| 	ts->tv_usec = curtime.tv_nsec / 1000;
 | |
| } | 
