move components to SDK dir

This commit is contained in:
Shuanglei Tao
2025-03-03 09:06:26 +08:00
parent 20d1297e57
commit f4f4c9e60d
1021 changed files with 58 additions and 35059 deletions

View File

@@ -0,0 +1,211 @@
/**
* Copyright (c) 2016 - 2017, Nordic Semiconductor ASA
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form, except as embedded into a Nordic
* Semiconductor ASA integrated circuit in a product or a software update for
* such product, must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other
* materials provided with the distribution.
*
* 3. Neither the name of Nordic Semiconductor ASA nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* 4. This software, with or without modification, must only be used with a
* Nordic Semiconductor ASA integrated circuit.
*
* 5. Any software provided in binary form under this license must not be reverse
* engineered, decompiled, modified and/or disassembled.
*
* THIS SOFTWARE IS PROVIDED BY NORDIC SEMICONDUCTOR ASA "AS IS" AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NORDIC SEMICONDUCTOR ASA OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "nrf_block_dev_empty.h"
/**@file
*
* @ingroup nrf_block_dev
* @{
*
* @brief This module implements block device API. It would behave like:
* - /dev/empty for write operations
* - /dev/zero for read operations
*/
static ret_code_t block_dev_empty_init(nrf_block_dev_t const * p_blk_dev,
nrf_block_dev_ev_handler ev_handler,
void const * p_context)
{
ASSERT(p_blk_dev);
nrf_block_dev_empty_t const * p_empty_dev =
CONTAINER_OF(p_blk_dev, nrf_block_dev_empty_t, block_dev);
nrf_block_dev_empty_work_t * p_work = p_empty_dev->p_work;
/* Calculate block device geometry.... */
p_work->geometry.blk_size = p_empty_dev->empty_config.block_size;
p_work->geometry.blk_count = p_empty_dev->empty_config.block_count;
p_work->p_context = p_context;
p_work->ev_handler = ev_handler;
if (p_work->ev_handler)
{
/*Asynchronous operation (simulation)*/
const nrf_block_dev_event_t ev = {
NRF_BLOCK_DEV_EVT_INIT,
NRF_BLOCK_DEV_RESULT_SUCCESS,
NULL,
p_work->p_context
};
p_work->ev_handler(p_blk_dev, &ev);
}
return NRF_SUCCESS;
}
static ret_code_t block_dev_empty_uninit(nrf_block_dev_t const * p_blk_dev)
{
ASSERT(p_blk_dev);
nrf_block_dev_empty_t const * p_empty_dev =
CONTAINER_OF(p_blk_dev, nrf_block_dev_empty_t, block_dev);
nrf_block_dev_empty_work_t * p_work = p_empty_dev->p_work;
if (p_work->ev_handler)
{
/*Asynchronous operation (simulation)*/
const nrf_block_dev_event_t ev = {
NRF_BLOCK_DEV_EVT_UNINIT,
NRF_BLOCK_DEV_RESULT_SUCCESS,
NULL,
p_work->p_context
};
p_work->ev_handler(p_blk_dev, &ev);
}
memset(p_work, 0, sizeof(nrf_block_dev_empty_work_t));
return NRF_SUCCESS;
}
static ret_code_t block_dev_empty_read_req(nrf_block_dev_t const * p_blk_dev,
nrf_block_req_t const * p_blk)
{
ASSERT(p_blk_dev);
ASSERT(p_blk);
nrf_block_dev_empty_t const * p_empty_dev =
CONTAINER_OF(p_blk_dev, nrf_block_dev_empty_t, block_dev);
nrf_block_dev_empty_work_t * p_work = p_empty_dev->p_work;
memset(p_blk->p_buff, 0, p_empty_dev->p_work->geometry.blk_size * p_blk->blk_count);
if (p_work->ev_handler)
{
/*Asynchronous operation (simulation)*/
const nrf_block_dev_event_t ev = {
NRF_BLOCK_DEV_EVT_BLK_READ_DONE,
NRF_BLOCK_DEV_RESULT_SUCCESS,
p_blk,
p_work->p_context
};
p_work->ev_handler(p_blk_dev, &ev);
}
return NRF_SUCCESS;
}
static ret_code_t block_dev_empty_write_req(nrf_block_dev_t const * p_blk_dev,
nrf_block_req_t const * p_blk)
{
ASSERT(p_blk_dev);
ASSERT(p_blk);
nrf_block_dev_empty_t const * p_empty_dev =
CONTAINER_OF(p_blk_dev, nrf_block_dev_empty_t, block_dev);
nrf_block_dev_empty_work_t * p_work = p_empty_dev->p_work;
if (p_work->ev_handler)
{
/*Asynchronous operation (simulation)*/
const nrf_block_dev_event_t ev = {
NRF_BLOCK_DEV_EVT_BLK_WRITE_DONE,
NRF_BLOCK_DEV_RESULT_SUCCESS,
p_blk,
p_work->p_context
};
p_work->ev_handler(p_blk_dev, &ev);
}
return NRF_SUCCESS;
}
static ret_code_t block_dev_empty_ioctl(nrf_block_dev_t const * p_blk_dev,
nrf_block_dev_ioctl_req_t req, void * p_data)
{
nrf_block_dev_empty_t const * p_empty_dev =
CONTAINER_OF(p_blk_dev, nrf_block_dev_empty_t, block_dev);
switch (req)
{
case NRF_BLOCK_DEV_IOCTL_REQ_CACHE_FLUSH:
{
bool * p_flushing = p_data;
if (p_flushing)
{
*p_flushing = false;
}
return NRF_SUCCESS;
}
case NRF_BLOCK_DEV_IOCTL_REQ_INFO_STRINGS:
{
if (p_data == NULL)
{
return NRF_ERROR_INVALID_PARAM;
}
nrf_block_dev_info_strings_t const * * pp_strings = p_data;
*pp_strings = &p_empty_dev->info_strings;
return NRF_SUCCESS;
}
default:
break;
}
return NRF_ERROR_NOT_SUPPORTED;
}
static nrf_block_dev_geometry_t const * block_dev_empty_geometry(nrf_block_dev_t const * p_blk_dev)
{
ASSERT(p_blk_dev);
nrf_block_dev_empty_t const * p_empty_dev =
CONTAINER_OF(p_blk_dev, nrf_block_dev_empty_t, block_dev);
nrf_block_dev_empty_work_t const * p_work = p_empty_dev->p_work;
return &p_work->geometry;
}
const nrf_block_dev_ops_t nrf_block_device_empty_ops = {
.init = block_dev_empty_init,
.uninit = block_dev_empty_uninit,
.read_req = block_dev_empty_read_req,
.write_req = block_dev_empty_write_req,
.ioctl = block_dev_empty_ioctl,
.geometry = block_dev_empty_geometry,
};

View File

@@ -0,0 +1,141 @@
/**
* Copyright (c) 2016 - 2017, Nordic Semiconductor ASA
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form, except as embedded into a Nordic
* Semiconductor ASA integrated circuit in a product or a software update for
* such product, must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other
* materials provided with the distribution.
*
* 3. Neither the name of Nordic Semiconductor ASA nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* 4. This software, with or without modification, must only be used with a
* Nordic Semiconductor ASA integrated circuit.
*
* 5. Any software provided in binary form under this license must not be reverse
* engineered, decompiled, modified and/or disassembled.
*
* THIS SOFTWARE IS PROVIDED BY NORDIC SEMICONDUCTOR ASA "AS IS" AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NORDIC SEMICONDUCTOR ASA OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef NRF_BLOCK_DEV_EMPTY_H__
#define NRF_BLOCK_DEV_EMPTY_H__
#ifdef __cplusplus
extern "C" {
#endif
#include "nrf_block_dev.h"
/**@file
*
* @defgroup nrf_block_dev_empty Empty implementation
* @ingroup nrf_block_dev
*
* This module implements block device API. It works like:
* - /dev/empty for write operations
* - /dev/zero for read operations
* @{
*
*/
/**
* @brief EMPTY block device operations
* */
extern const nrf_block_dev_ops_t nrf_block_device_empty_ops;
/**
* @brief Work structure of EMPTY block device.
*/
typedef struct {
nrf_block_dev_geometry_t geometry; //!< Block device geometry
nrf_block_dev_ev_handler ev_handler; //!< Block device event handler
void const * p_context; //!< Context handle passed to event handler
} nrf_block_dev_empty_work_t;
/**
* @brief EMPTY block device config initializer (@ref nrf_block_dev_empty_config_t)
*
* @param blk_size Block size
* @param blk_count Block count
* */
#define NRF_BLOCK_DEV_EMPTY_CONFIG(blk_size, blk_count) { \
.block_size = (blk_size), \
.block_count = (blk_count) \
}
/**
* @brief EMPTY block device config
*/
typedef struct {
uint32_t block_size; //!< Desired block size
uint32_t block_count; //!< Desired block count
} nrf_block_dev_empty_config_t;
/**
* @brief EMPTY block device
* */
typedef struct {
nrf_block_dev_t block_dev; //!< Block device
nrf_block_dev_info_strings_t info_strings; //!< Block device information strings
nrf_block_dev_empty_config_t empty_config; //!< EMPTY block device config
nrf_block_dev_empty_work_t * p_work; //!< EMPTY block device work structure
} nrf_block_dev_empty_t;
/**
* @brief Defines a EMPTY block device.
*
* @param name Instance name
* @param config Configuration @ref nrf_block_dev_empty_config_t
* @param info Info strings @ref NFR_BLOCK_DEV_INFO_CONFIG
* */
#define NRF_BLOCK_DEV_EMPTY_DEFINE(name, config, info) \
static nrf_block_dev_empty_work_t CONCAT_2(name, _work); \
static const nrf_block_dev_empty_t name = { \
.block_dev = { .p_ops = &nrf_block_device_empty_ops }, \
.info_strings = BRACKET_EXTRACT(info), \
.empty_config = config, \
.p_work = &CONCAT_2(name, _work), \
}
/**
* @brief Returns block device API handle from EMPTY block device.
*
* @param[in] p_blk_empty EMPTY block device
* @return Block device handle
*/
static inline nrf_block_dev_t const *
nrf_block_dev_empty_ops_get(nrf_block_dev_empty_t const * p_blk_empty)
{
return &p_blk_empty->block_dev;
}
/** @} */
#ifdef __cplusplus
}
#endif
#endif /* NRF_BLOCK_DEV_EMPTY_H__ */

View File

@@ -0,0 +1,353 @@
/**
* Copyright (c) 2016 - 2017, Nordic Semiconductor ASA
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form, except as embedded into a Nordic
* Semiconductor ASA integrated circuit in a product or a software update for
* such product, must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other
* materials provided with the distribution.
*
* 3. Neither the name of Nordic Semiconductor ASA nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* 4. This software, with or without modification, must only be used with a
* Nordic Semiconductor ASA integrated circuit.
*
* 5. Any software provided in binary form under this license must not be reverse
* engineered, decompiled, modified and/or disassembled.
*
* THIS SOFTWARE IS PROVIDED BY NORDIC SEMICONDUCTOR ASA "AS IS" AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NORDIC SEMICONDUCTOR ASA OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef NRF_BLOCK_DEV_H__
#define NRF_BLOCK_DEV_H__
#ifdef __cplusplus
extern "C" {
#endif
#include "sdk_common.h"
#include "nrf_assert.h"
#include <stddef.h>
/**@file
*
* @defgroup nrf_block_dev Block device
* @{
* @ingroup app_common
*
* @brief This module implements unified block device API. It could used as a middle layer between
* filesystems and memories.
*/
/**
* @brief Block device request descriptor item.
*/
typedef struct {
uint32_t blk_id; //!< Block ID
uint32_t blk_count; //!< Block count
void * p_buff; //!< Data buffer
} nrf_block_req_t;
/**
* @brief Helper macro to create block device read/write request item
*
* @param name Instance name
* @param block_start Block number start
* @param block_count Number of blocks
* @param buff Buffer to read/write
*/
#define NRF_BLOCK_DEV_REQUEST(name, block_start, block_count, buff) \
nrf_block_req_t name = { \
.blk_id = block_start, \
.blk_count = block_count, \
.p_buff = buff, \
}
/**
* @brief Block device events.
*
* Events are propagated when event handler is defined (@ref nrf_blk_dev_init)
*
*/
typedef enum {
NRF_BLOCK_DEV_EVT_INIT, /**< Passed to event handler when init is done*/
NRF_BLOCK_DEV_EVT_UNINIT, /**< Passed to event handler when uninit is done*/
NRF_BLOCK_DEV_EVT_BLK_READ_DONE, /**< Passed to event handler block read operation is done*/
NRF_BLOCK_DEV_EVT_BLK_WRITE_DONE, /**< Passed to event handler block write operation is done*/
} nrf_block_dev_event_type_t;
typedef enum {
NRF_BLOCK_DEV_RESULT_SUCCESS = 0, /**< Operation completed succsefully*/
NRF_BLOCK_DEV_RESULT_IO_ERROR, /**< I/O error*/
NRF_BLOCK_DEV_RESULT_TIMEOUT, /**< Device timeout*/
} nrf_block_dev_result_t;
/**
* @brief Block device event
* */
typedef struct {
nrf_block_dev_event_type_t ev_type; //!< Event type
nrf_block_dev_result_t result; //!< Operation status
nrf_block_req_t const * p_blk_req; //!< Block request
void const * p_context; //!< Event context
} nrf_block_dev_event_t;
struct nrf_block_dev_s;
/**
* @brief Block device event handler.
*
* @param[in] p_blk_dev Block device handle
* @param[in] p_event Block device event
*/
typedef void (* nrf_block_dev_ev_handler)(struct nrf_block_dev_s const * p_blk_dev,
nrf_block_dev_event_t const * p_event);
/**
* @brief Block device geometry
*/
typedef struct {
uint32_t blk_count; //!< Block count
uint32_t blk_size; //!< Block size
} nrf_block_dev_geometry_t;
/**
* @brief Block device information strings
*/
typedef struct {
const char * p_vendor; //!< Vendor string
const char * p_product; //!< Product string
const char * p_revision; //!< Revision string
} nrf_block_dev_info_strings_t;
/**
* @brief Block device information config
*
* @param vendor Vendor string
* @param product Product string
* @param revision Revision string
* */
#define NFR_BLOCK_DEV_INFO_CONFIG(vendor, product, revision) ( { \
.p_vendor = vendor, \
.p_product = product, \
.p_revision = revision, \
})
/**
* @brief Empty info string initializer
* */
#define NFR_BLOCK_DEV_INFO_CONFIG_EMPTY \
NFR_BLOCK_DEV_INFO_CONFIG(NULL, NULL, NULL)
/**
* @brief Block device IOCTL requests
*/
typedef enum {
NRF_BLOCK_DEV_IOCTL_REQ_CACHE_FLUSH = 0, /**< Cache flush IOCTL request*/
NRF_BLOCK_DEV_IOCTL_REQ_INFO_STRINGS, /**< Get info strings IOCTL request*/
} nrf_block_dev_ioctl_req_t;
/**
* @brief Helper macro to get block device address from specific instance
*
* @param instance Block device instance
* @param member Block device member name
* */
#define NRF_BLOCKDEV_BASE_ADDR(instance, member) &(instance).member
/**
* @brief Block device API
* */
typedef struct nrf_block_dev_s {
struct nrf_block_dev_ops_s {
/**
* @brief @ref nrf_blk_dev_init
*/
ret_code_t (*init)(struct nrf_block_dev_s const * p_blk_dev,
nrf_block_dev_ev_handler ev_handler,
void const * p_context);
/**
* @brief @ref nrf_blk_dev_uninit
*/
ret_code_t (*uninit)(struct nrf_block_dev_s const * p_blk_dev);
/**
* @brief @ref nrf_blk_dev_read_req
*/
ret_code_t (*read_req)(struct nrf_block_dev_s const * p_blk_dev,
nrf_block_req_t const * p_blk);
/**
* @brief @ref nrf_blk_dev_write_req
*/
ret_code_t (*write_req)(struct nrf_block_dev_s const * p_blk_dev,
nrf_block_req_t const * p_blk);
/**
* @brief @ref nrf_blk_dev_ioctl
*/
ret_code_t (*ioctl)(struct nrf_block_dev_s const * p_blk_dev,
nrf_block_dev_ioctl_req_t req,
void * p_data);
/**
* @brief @ref nrf_blk_dev_geometry
*/
nrf_block_dev_geometry_t const * (*geometry)(struct nrf_block_dev_s const * p_blk_dev);
} const * p_ops;
} nrf_block_dev_t;
/**
* @brief Internals of @ref nrf_block_dev_t
* */
typedef struct nrf_block_dev_ops_s nrf_block_dev_ops_t;
/**
* @brief Initializes a block device.
*
* @param[in] p_blk_dev Block device handle
* @param[in] ev_handler Event handler (pass NULL to work in synchronous mode)
* @param[in] p_context Context passed to event handler
*
* @return Standard error code
*/
static inline ret_code_t nrf_blk_dev_init(nrf_block_dev_t const * p_blk_dev,
nrf_block_dev_ev_handler ev_handler,
void const * p_context)
{
ASSERT(p_blk_dev->p_ops->init);
return p_blk_dev->p_ops->init(p_blk_dev, ev_handler, p_context);
}
/**
* @brief Un-initializes a block device.
*
* @param[in] p_blk_dev Block device handle
*
* @return Standard error code
*/
static inline ret_code_t nrf_blk_dev_uninit(nrf_block_dev_t const * p_blk_dev)
{
ASSERT(p_blk_dev->p_ops->uninit);
return p_blk_dev->p_ops->uninit(p_blk_dev);
}
/**
* @brief Block read request.
*
* In synchronous mode this function will execute the read operation
* and wait for its completion. In asynchronous mode the function will only request
* the operation and return immediately. Then, the @ref NRF_BLOCK_DEV_EVT_BLK_READ_DONE
* event will signal that operation has been completed and the specified buffer contains
* valid data.
*
* @param[in] p_blk_dev Block device handle
* @param[in] p_blk Block device request
*
* @return Standard error code
*/
static inline ret_code_t nrf_blk_dev_read_req(nrf_block_dev_t const * p_blk_dev,
nrf_block_req_t const * p_blk)
{
ASSERT(p_blk_dev->p_ops->read_req);
ASSERT(p_blk_dev->p_ops->geometry);
if (p_blk->blk_id >= p_blk_dev->p_ops->geometry(p_blk_dev)->blk_count)
{
return NRF_ERROR_INVALID_PARAM;
}
return p_blk_dev->p_ops->read_req(p_blk_dev, p_blk);
}
/**
* @brief Block write request.
*
* In synchronous mode this function will execute the write operation
* and wait for its completion. In asynchronous mode the function will only request
* the operation and return immediately. Then, the @ref NRF_BLOCK_DEV_EVT_BLK_WRITE_DONE
* event will signal that operation has been completed and the specified buffer
* can be freed.
*
* @param[in] p_blk_dev Block device handle
* @param[in] p_blk Block device request
*
* @return Standard error code
*/
static inline ret_code_t nrf_blk_dev_write_req(nrf_block_dev_t const * p_blk_dev,
nrf_block_req_t const * p_blk)
{
ASSERT(p_blk_dev->p_ops->write_req);
ASSERT(p_blk_dev->p_ops->geometry);
if (p_blk->blk_id >= p_blk_dev->p_ops->geometry(p_blk_dev)->blk_count)
{
return NRF_ERROR_INVALID_PARAM;
}
return p_blk_dev->p_ops->write_req(p_blk_dev, p_blk);
}
/**
* @brief IO control function.
*
* @param[in] p_blk_dev Block device handle
* @param[in] req Block device ioctl request
* @param[in] p_data Block device ioctl data
*
* @return Standard error code
* */
static inline ret_code_t nrf_blk_dev_ioctl(nrf_block_dev_t const * p_blk_dev,
nrf_block_dev_ioctl_req_t req,
void * p_data)
{
ASSERT(p_blk_dev->p_ops->ioctl);
return p_blk_dev->p_ops->ioctl(p_blk_dev, req, p_data);
}
/**
* @brief Return a geometry of a block device.
*
* @param[in] p_blk_dev Block device handle
*
* @return Block size and count @ref nrf_block_dev_geometry_t
*/
static inline nrf_block_dev_geometry_t const *
nrf_blk_dev_geometry(nrf_block_dev_t const * p_blk_dev)
{
ASSERT(p_blk_dev->p_ops->geometry);
return p_blk_dev->p_ops->geometry(p_blk_dev);
}
/** @} */
#ifdef __cplusplus
}
#endif
#endif /* NRF_BLOCK_DEV_H__ */

View File

@@ -0,0 +1,758 @@
/**
* Copyright (c) 2016 - 2017, Nordic Semiconductor ASA
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form, except as embedded into a Nordic
* Semiconductor ASA integrated circuit in a product or a software update for
* such product, must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other
* materials provided with the distribution.
*
* 3. Neither the name of Nordic Semiconductor ASA nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* 4. This software, with or without modification, must only be used with a
* Nordic Semiconductor ASA integrated circuit.
*
* 5. Any software provided in binary form under this license must not be reverse
* engineered, decompiled, modified and/or disassembled.
*
* THIS SOFTWARE IS PROVIDED BY NORDIC SEMICONDUCTOR ASA "AS IS" AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NORDIC SEMICONDUCTOR ASA OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "nrf_serial_flash_params.h"
#include "nrf_block_dev_qspi.h"
/**@file
*
* @ingroup nrf_block_dev_qspi
* @{
*
* @brief This module implements block device API. It should be used as a reference block device.
*/
#define QSPI_STD_CMD_WRSR 0x01 /**< Write status register command*/
#define QSPI_STD_CMD_RSTEN 0x66 /**< Reset enable command*/
#define QSPI_STD_CMD_RST 0x99 /**< Reset command*/
#define QSPI_STD_CMD_READ_ID 0x9F /**< Read ID command*/
#define BD_PAGE_PROGRAM_SIZE 256 /**< Page program size (minimum block size)*/
#define BD_ERASE_UNIT_INVALID_ID 0xFFFFFFFF /**< Invalid erase unit number*/
#define BD_ERASE_UNIT_ERASE_VAL 0xFFFFFFFF /**< Erased memory value*/
/**
* @brief Block to erase unit translation
*
* @param blk_id Block index
* @param blk_size Block size
* */
#define BD_BLOCK_TO_ERASEUNIT(blk_id, blk_size) \
((blk_id) * (blk_size)) / (NRF_BLOCK_DEV_QSPI_ERASE_UNIT_SIZE)
/**
* @brief Blocks per erase unit
*
* @param blk_size Block size
* */
#define BD_BLOCKS_PER_ERASEUNIT(blk_size) \
(NRF_BLOCK_DEV_QSPI_ERASE_UNIT_SIZE / (blk_size))
static ret_code_t block_dev_qspi_eunit_write(nrf_block_dev_qspi_t const * p_qspi_dev,
nrf_block_req_t * p_blk_left);
static void block_dev_qspi_read_from_eunit(nrf_block_dev_qspi_t const * p_qspi_dev)
{
nrf_block_dev_qspi_work_t const * p_work = p_qspi_dev->p_work;
/*In write-back mode data that we read might not be the same as in erase unit buffer*/
uint32_t eunit_start = BD_BLOCK_TO_ERASEUNIT(p_work->req.blk_id,
p_work->geometry.blk_size);
uint32_t eunit_end = BD_BLOCK_TO_ERASEUNIT(p_work->req.blk_id + p_work->req.blk_count,
p_work->geometry.blk_size);
if ((eunit_start > p_work->erase_unit_idx) || (eunit_end < p_work->erase_unit_idx))
{
/*Do nothing. Read request doesn't hit current cached erase unit*/
return;
}
/*Case 1: Copy data from start erase unit*/
if (eunit_start == p_work->erase_unit_idx)
{
size_t blk = p_work->req.blk_id %
BD_BLOCKS_PER_ERASEUNIT(p_work->geometry.blk_size);
size_t cnt = BD_BLOCKS_PER_ERASEUNIT(p_work->geometry.blk_size) - blk;
size_t off = p_work->geometry.blk_size * blk;
if (cnt > p_work->req.blk_count)
{
cnt = p_work->req.blk_count;
}
memcpy(p_work->req.p_buff,
p_work->p_erase_unit_buff + off,
cnt * p_work->geometry.blk_size);
return;
}
/*Case 2: Copy data from end erase unit*/
if (eunit_end == p_work->erase_unit_idx)
{
size_t cnt = (p_work->req.blk_id + p_work->req.blk_count) %
BD_BLOCKS_PER_ERASEUNIT(p_work->geometry.blk_size);
size_t off = (p_work->erase_unit_idx * BD_BLOCKS_PER_ERASEUNIT(p_work->geometry.blk_size) -
p_work->req.blk_id) * p_work->geometry.blk_size;
if (cnt > p_work->req.blk_count)
{
cnt = p_work->req.blk_count;
}
memcpy((uint8_t *)p_work->req.p_buff + off,
p_work->p_erase_unit_buff,
cnt * p_work->geometry.blk_size);
return;
}
/*Case 3: Copy data from eunit_start < p_work->erase_unit_idx < eunit_end*/
size_t off = (p_work->erase_unit_idx * BD_BLOCKS_PER_ERASEUNIT(p_work->geometry.blk_size) -
p_work->req.blk_id) * p_work->geometry.blk_size;
memcpy((uint8_t *)p_work->req.p_buff + off,
p_work->p_erase_unit_buff,
NRF_BLOCK_DEV_QSPI_ERASE_UNIT_SIZE);
}
/**
* @brief Active QSPI block device handle. Only one instance.
* */
static nrf_block_dev_qspi_t const * m_active_qspi_dev;
static void qspi_handler(nrf_drv_qspi_evt_t event, void * p_context)
{
if (m_active_qspi_dev != p_context)
{
return;
}
nrf_block_dev_qspi_t const * p_qspi_dev = p_context;
nrf_block_dev_qspi_work_t * p_work = p_qspi_dev->p_work;
nrf_block_req_t * p_blk_left = &p_work->left_req;
switch (p_work->state)
{
case NRF_BLOCK_DEV_QSPI_STATE_READ_EXEC:
{
if (p_work->writeback_mode)
{
block_dev_qspi_read_from_eunit(p_qspi_dev);
}
p_work->state = NRF_BLOCK_DEV_QSPI_STATE_IDLE;
if (p_work->ev_handler)
{
const nrf_block_dev_event_t ev = {
NRF_BLOCK_DEV_EVT_BLK_READ_DONE,
NRF_BLOCK_DEV_RESULT_SUCCESS,
&p_work->req,
p_work->p_context
};
p_work->ev_handler(&p_qspi_dev->block_dev, &ev);
}
break;
}
case NRF_BLOCK_DEV_QSPI_STATE_EUNIT_LOAD:
{
ret_code_t ret;
uint32_t erase_unit = BD_BLOCK_TO_ERASEUNIT(p_blk_left->blk_id,
p_work->geometry.blk_size);
UNUSED_VARIABLE(erase_unit);
ASSERT(erase_unit == p_work->erase_unit_idx);
/* Check if block is in erase unit buffer*/
ret = block_dev_qspi_eunit_write(p_qspi_dev, p_blk_left);
ASSERT(ret == NRF_SUCCESS);
UNUSED_VARIABLE(ret);
break;
}
case NRF_BLOCK_DEV_QSPI_STATE_WRITE_ERASE:
case NRF_BLOCK_DEV_QSPI_STATE_WRITE_EXEC:
{
/*Clear last programmed block*/
uint32_t block_to_program = __CLZ(__RBIT(p_work->erase_unit_dirty_blocks));
if (p_work->state == NRF_BLOCK_DEV_QSPI_STATE_WRITE_EXEC)
{
p_work->erase_unit_dirty_blocks ^= 1u << block_to_program;
}
if (p_work->erase_unit_dirty_blocks == 0)
{
if (p_work->left_req.blk_count)
{
/*Load next erase unit*/
ret_code_t ret;
uint32_t eunit = BD_BLOCK_TO_ERASEUNIT(p_blk_left->blk_id,
p_work->geometry.blk_size);
p_work->erase_unit_idx = eunit;
p_work->state = NRF_BLOCK_DEV_QSPI_STATE_EUNIT_LOAD;
ret = nrf_drv_qspi_read(p_work->p_erase_unit_buff,
NRF_BLOCK_DEV_QSPI_ERASE_UNIT_SIZE,
p_work->erase_unit_idx *
NRF_BLOCK_DEV_QSPI_ERASE_UNIT_SIZE);
UNUSED_VARIABLE(ret);
break;
}
/*All blocks are programmed. Call event handler if required.*/
p_work->state = NRF_BLOCK_DEV_QSPI_STATE_IDLE;
if (p_work->ev_handler && !p_work->cache_flushing)
{
const nrf_block_dev_event_t ev = {
NRF_BLOCK_DEV_EVT_BLK_WRITE_DONE,
NRF_BLOCK_DEV_RESULT_SUCCESS,
&p_work->req,
p_work->p_context
};
p_work->ev_handler(&p_qspi_dev->block_dev, &ev);
}
p_work->cache_flushing = false;
break;
}
/*Get next block to program from program mask*/
block_to_program = __CLZ(__RBIT(p_work->erase_unit_dirty_blocks));
uint32_t dst_address = (p_work->erase_unit_idx * NRF_BLOCK_DEV_QSPI_ERASE_UNIT_SIZE) +
(block_to_program * p_work->geometry.blk_size);
const void * p_src_address = p_work->p_erase_unit_buff +
block_to_program * p_work->geometry.blk_size;
p_work->state = NRF_BLOCK_DEV_QSPI_STATE_WRITE_EXEC;
ret_code_t ret = nrf_drv_qspi_write(p_src_address,
p_work->geometry.blk_size,
dst_address);
UNUSED_VARIABLE(ret);
break;
}
default:
ASSERT(0);
break;
}
}
static void wait_for_idle(nrf_block_dev_qspi_t const * p_qspi_dev)
{
nrf_block_dev_qspi_work_t * p_work = p_qspi_dev->p_work;
while (p_work->state != NRF_BLOCK_DEV_QSPI_STATE_IDLE)
{
__WFI();
}
}
static ret_code_t block_dev_qspi_init(nrf_block_dev_t const * p_blk_dev,
nrf_block_dev_ev_handler ev_handler,
void const * p_context)
{
ASSERT(p_blk_dev);
nrf_block_dev_qspi_t const * p_qspi_dev =
CONTAINER_OF(p_blk_dev, nrf_block_dev_qspi_t, block_dev);
nrf_block_dev_qspi_work_t * p_work = p_qspi_dev->p_work;
nrf_drv_qspi_config_t const * p_qspi_cfg = &p_qspi_dev->qspi_bdev_config.qspi_config;
ret_code_t ret = NRF_SUCCESS;
if (p_qspi_dev->qspi_bdev_config.block_size % BD_PAGE_PROGRAM_SIZE)
{
/*Unsupported block size*/
return NRF_ERROR_NOT_SUPPORTED;
}
if (NRF_BLOCK_DEV_QSPI_ERASE_UNIT_SIZE % p_qspi_dev->qspi_bdev_config.block_size)
{
/*Unsupported block size*/
return NRF_ERROR_NOT_SUPPORTED;
}
if (m_active_qspi_dev)
{
/* QSPI instance is BUSY*/
return NRF_ERROR_BUSY;
}
ret = nrf_drv_qspi_init(p_qspi_cfg, qspi_handler, (void *)p_blk_dev);
if (ret != NRF_SUCCESS)
{
return ret;
}
nrf_qspi_cinstr_conf_t cinstr_cfg = {
.opcode = QSPI_STD_CMD_RSTEN,
.length = NRF_QSPI_CINSTR_LEN_1B,
.io2_level = true,
.io3_level = true,
.wipwait = true,
.wren = true
};
/* Send reset enable */
ret = nrf_drv_qspi_cinstr_xfer(&cinstr_cfg, NULL, NULL);
if (ret != NRF_SUCCESS)
{
return ret;
}
/* Send reset command */
cinstr_cfg.opcode = QSPI_STD_CMD_RST;
ret = nrf_drv_qspi_cinstr_xfer(&cinstr_cfg, NULL, NULL);
if (ret != NRF_SUCCESS)
{
return ret;
}
/* Get 3 byte identification value */
uint8_t rdid_buf[3] = {0, 0, 0};
cinstr_cfg.opcode = QSPI_STD_CMD_READ_ID;
cinstr_cfg.length = NRF_QSPI_CINSTR_LEN_4B;
ret = nrf_drv_qspi_cinstr_xfer(&cinstr_cfg, NULL, rdid_buf);
if (ret != NRF_SUCCESS)
{
return ret;
}
nrf_serial_flash_params_t const * serial_flash_id = nrf_serial_flash_params_get(rdid_buf);
if (!serial_flash_id)
{
return NRF_ERROR_NOT_SUPPORTED;
}
if (serial_flash_id->erase_size != NRF_BLOCK_DEV_QSPI_ERASE_UNIT_SIZE)
{
return NRF_ERROR_NOT_SUPPORTED;
}
/* Calculate block device geometry.... */
uint32_t blk_size = p_qspi_dev->qspi_bdev_config.block_size;
uint32_t blk_count = serial_flash_id->size / p_qspi_dev->qspi_bdev_config.block_size;
if (!blk_count || (blk_count % BD_BLOCKS_PER_ERASEUNIT(blk_size)))
{
return NRF_ERROR_NOT_SUPPORTED;
}
p_work->geometry.blk_size = blk_size;
p_work->geometry.blk_count = blk_count;
p_work->p_context = p_context;
p_work->ev_handler = ev_handler;
p_work->state = NRF_BLOCK_DEV_QSPI_STATE_IDLE;
p_work->erase_unit_idx = BD_ERASE_UNIT_INVALID_ID;
p_work->writeback_mode = (p_qspi_dev->qspi_bdev_config.flags &
NRF_BLOCK_DEV_QSPI_FLAG_CACHE_WRITEBACK) != 0;
m_active_qspi_dev = p_qspi_dev;
if (p_work->ev_handler)
{
/*Asynchronous operation (simulation)*/
const nrf_block_dev_event_t ev = {
NRF_BLOCK_DEV_EVT_INIT,
NRF_BLOCK_DEV_RESULT_SUCCESS,
NULL,
p_work->p_context
};
p_work->ev_handler(p_blk_dev, &ev);
}
return NRF_SUCCESS;
}
static ret_code_t block_dev_qspi_uninit(nrf_block_dev_t const * p_blk_dev)
{
ASSERT(p_blk_dev);
nrf_block_dev_qspi_t const * p_qspi_dev =
CONTAINER_OF(p_blk_dev, nrf_block_dev_qspi_t, block_dev);
nrf_block_dev_qspi_work_t * p_work = p_qspi_dev->p_work;
if (m_active_qspi_dev != p_qspi_dev)
{
/* QSPI instance is BUSY*/
return NRF_ERROR_BUSY;
}
if (p_work->state != NRF_BLOCK_DEV_QSPI_STATE_IDLE)
{
/* Previous asynchronous operation in progress*/
return NRF_ERROR_BUSY;
}
if (p_work->ev_handler)
{
/*Asynchronous operation*/
const nrf_block_dev_event_t ev = {
NRF_BLOCK_DEV_EVT_UNINIT,
NRF_BLOCK_DEV_RESULT_SUCCESS,
NULL,
p_work->p_context
};
p_work->ev_handler(p_blk_dev, &ev);
}
p_work->state = NRF_BLOCK_DEV_QSPI_STATE_DISABLED;
nrf_drv_qspi_uninit();
memset(p_work, 0, sizeof(nrf_block_dev_qspi_work_t));
m_active_qspi_dev = NULL;
return NRF_SUCCESS;
}
static ret_code_t block_dev_qspi_read_req(nrf_block_dev_t const * p_blk_dev,
nrf_block_req_t const * p_blk)
{
ASSERT(p_blk_dev);
ASSERT(p_blk);
nrf_block_dev_qspi_t const * p_qspi_dev =
CONTAINER_OF(p_blk_dev, nrf_block_dev_qspi_t, block_dev);
nrf_block_dev_qspi_work_t * p_work = p_qspi_dev->p_work;
ret_code_t ret = NRF_SUCCESS;
if (m_active_qspi_dev != p_qspi_dev)
{
/* QSPI instance is BUSY*/
return NRF_ERROR_BUSY;
}
if (p_work->state != NRF_BLOCK_DEV_QSPI_STATE_IDLE)
{
/* Previous asynchronous operation in progress*/
return NRF_ERROR_BUSY;
}
p_work->left_req = *p_blk;
p_work->req = *p_blk;
nrf_block_req_t * p_blk_left = &p_work->left_req;
p_work->state = NRF_BLOCK_DEV_QSPI_STATE_READ_EXEC;
ret = nrf_drv_qspi_read(p_blk_left->p_buff,
p_blk_left->blk_count * p_work->geometry.blk_size,
p_blk_left->blk_id * p_work->geometry.blk_size);
if (ret != NRF_SUCCESS)
{
p_work->state = NRF_BLOCK_DEV_QSPI_STATE_IDLE;
return ret;
}
p_blk_left->p_buff = NULL;
p_blk_left->blk_count = 0;
if (!p_work->ev_handler && (p_work->state != NRF_BLOCK_DEV_QSPI_STATE_IDLE))
{
/*Synchronous operation*/
wait_for_idle(p_qspi_dev);
}
return ret;
}
static bool block_dev_qspi_update_eunit(nrf_block_dev_qspi_t const * p_qspi_dev,
size_t off,
const void * p_src,
size_t len)
{
ASSERT((len % sizeof(uint32_t)) == 0)
nrf_block_dev_qspi_work_t * p_work = p_qspi_dev->p_work;
uint32_t * p_dst32 = (uint32_t *)(p_work->p_erase_unit_buff + off);
const uint32_t * p_src32 = p_src;
bool erase_required = false;
len /= sizeof(uint32_t);
/*Do normal copying until erase unit is not required*/
do
{
if (*p_dst32 != *p_src32)
{
if (*p_dst32 != BD_ERASE_UNIT_ERASE_VAL)
{
erase_required = true;
}
/*Mark block as dirty*/
p_work->erase_unit_dirty_blocks |= 1u << (off / p_work->geometry.blk_size);
}
*p_dst32++ = *p_src32++;
off += sizeof(uint32_t);
} while (--len);
return erase_required;
}
static ret_code_t block_dev_qspi_write_start(nrf_block_dev_qspi_t const * p_qspi_dev)
{
nrf_block_dev_qspi_work_t * p_work = p_qspi_dev->p_work;
if (!p_work->erase_required)
{
/*Get first block to program from program mask*/
uint32_t block_to_program = __CLZ(__RBIT(p_work->erase_unit_dirty_blocks));
uint32_t dst_address = (p_work->erase_unit_idx * NRF_BLOCK_DEV_QSPI_ERASE_UNIT_SIZE) +
(block_to_program * p_work->geometry.blk_size);
const void * p_src_address = p_work->p_erase_unit_buff +
block_to_program * p_work->geometry.blk_size;
p_work->state = NRF_BLOCK_DEV_QSPI_STATE_WRITE_EXEC;
return nrf_drv_qspi_write(p_src_address,
p_work->geometry.blk_size,
dst_address);
}
/*Erase is required*/
uint32_t address = (p_work->erase_unit_idx * NRF_BLOCK_DEV_QSPI_ERASE_UNIT_SIZE);
p_work->state = NRF_BLOCK_DEV_QSPI_STATE_WRITE_ERASE;
p_work->erase_required = false;
return nrf_drv_qspi_erase(NRF_QSPI_ERASE_LEN_4KB, address);
}
static ret_code_t block_dev_qspi_eunit_write(nrf_block_dev_qspi_t const * p_qspi_dev,
nrf_block_req_t * p_blk_left)
{
nrf_block_dev_qspi_work_t * p_work = p_qspi_dev->p_work;
size_t blk = p_blk_left->blk_id %
BD_BLOCKS_PER_ERASEUNIT(p_work->geometry.blk_size);
size_t cnt = BD_BLOCKS_PER_ERASEUNIT(p_work->geometry.blk_size) - blk;
size_t off = p_work->geometry.blk_size * blk;
if (cnt > p_blk_left->blk_count)
{
cnt = p_blk_left->blk_count;
}
bool erase_required = block_dev_qspi_update_eunit(p_qspi_dev,
off,
p_blk_left->p_buff,
cnt * p_work->geometry.blk_size);
if (erase_required)
{
p_work->erase_required = true;
}
p_blk_left->blk_count -= cnt;
p_blk_left->blk_id += cnt;
p_blk_left->p_buff = (uint8_t *)p_blk_left->p_buff + cnt * p_work->geometry.blk_size;
if (p_work->erase_required)
{
uint32_t blk_size = p_work->geometry.blk_size;
p_work->erase_unit_dirty_blocks |= (1u << BD_BLOCKS_PER_ERASEUNIT(blk_size)) - 1;
}
if (p_work->erase_unit_dirty_blocks == 0 || p_work->writeback_mode)
{
/*No dirty blocks detected. Write end.*/
if (p_work->ev_handler && p_blk_left->blk_count == 0)
{
const nrf_block_dev_event_t ev = {
NRF_BLOCK_DEV_EVT_BLK_WRITE_DONE,
NRF_BLOCK_DEV_RESULT_SUCCESS,
&p_work->req,
p_work->p_context
};
p_work->state = NRF_BLOCK_DEV_QSPI_STATE_IDLE;
p_work->ev_handler(&p_qspi_dev->block_dev, &ev);
return NRF_SUCCESS;
}
}
return block_dev_qspi_write_start(p_qspi_dev);
}
static ret_code_t block_dev_qspi_write_req(nrf_block_dev_t const * p_blk_dev,
nrf_block_req_t const * p_blk)
{
ASSERT(p_blk_dev);
ASSERT(p_blk);
nrf_block_dev_qspi_t const * p_qspi_dev =
CONTAINER_OF(p_blk_dev, nrf_block_dev_qspi_t, block_dev);
nrf_block_dev_qspi_work_t * p_work = p_qspi_dev->p_work;
ret_code_t ret = NRF_SUCCESS;
if (m_active_qspi_dev != p_qspi_dev)
{
/* QSPI instance is BUSY*/
return NRF_ERROR_BUSY;
}
if (p_work->state != NRF_BLOCK_DEV_QSPI_STATE_IDLE)
{
/* Previous asynchronous operation in progress*/
return NRF_ERROR_BUSY;
}
p_work->left_req = *p_blk;
p_work->req = *p_blk;
nrf_block_req_t * p_blk_left = &p_work->left_req;
uint32_t erase_unit = BD_BLOCK_TO_ERASEUNIT(p_blk_left->blk_id,
p_work->geometry.blk_size);
/* Check if block is in erase unit buffer*/
if (erase_unit == p_work->erase_unit_idx)
{
ret = block_dev_qspi_eunit_write(p_qspi_dev, p_blk_left);
}
else
{
if (p_work->writeback_mode)
{
ret = block_dev_qspi_write_start(p_qspi_dev);
}
else
{
p_work->erase_unit_idx = erase_unit;
p_work->state = NRF_BLOCK_DEV_QSPI_STATE_EUNIT_LOAD;
ret = nrf_drv_qspi_read(p_work->p_erase_unit_buff,
NRF_BLOCK_DEV_QSPI_ERASE_UNIT_SIZE,
erase_unit * NRF_BLOCK_DEV_QSPI_ERASE_UNIT_SIZE);
}
}
if (ret != NRF_SUCCESS)
{
p_work->state = NRF_BLOCK_DEV_QSPI_STATE_IDLE;
return ret;
}
if (!p_work->ev_handler && (p_work->state != NRF_BLOCK_DEV_QSPI_STATE_IDLE))
{
/*Synchronous operation*/
wait_for_idle(p_qspi_dev);
}
return ret;
}
static ret_code_t block_dev_qspi_ioctl(nrf_block_dev_t const * p_blk_dev,
nrf_block_dev_ioctl_req_t req,
void * p_data)
{
ASSERT(p_blk_dev);
nrf_block_dev_qspi_t const * p_qspi_dev =
CONTAINER_OF(p_blk_dev, nrf_block_dev_qspi_t, block_dev);
nrf_block_dev_qspi_work_t * p_work = p_qspi_dev->p_work;
switch (req)
{
case NRF_BLOCK_DEV_IOCTL_REQ_CACHE_FLUSH:
{
bool * p_flushing = p_data;
if (p_work->state != NRF_BLOCK_DEV_QSPI_STATE_IDLE)
{
return NRF_ERROR_BUSY;
}
if (!p_work->writeback_mode || p_work->erase_unit_dirty_blocks == 0)
{
if (p_flushing)
{
*p_flushing = false;
}
return NRF_SUCCESS;
}
ret_code_t ret = block_dev_qspi_write_start(p_qspi_dev);
if (ret == NRF_SUCCESS)
{
if (p_flushing)
{
*p_flushing = true;
}
p_work->cache_flushing = true;
}
return ret;
}
case NRF_BLOCK_DEV_IOCTL_REQ_INFO_STRINGS:
{
if (p_data == NULL)
{
return NRF_ERROR_INVALID_PARAM;
}
nrf_block_dev_info_strings_t const * * pp_strings = p_data;
*pp_strings = &p_qspi_dev->info_strings;
return NRF_SUCCESS;
}
default:
break;
}
return NRF_ERROR_NOT_SUPPORTED;
}
static nrf_block_dev_geometry_t const * block_dev_qspi_geometry(nrf_block_dev_t const * p_blk_dev)
{
ASSERT(p_blk_dev);
nrf_block_dev_qspi_t const * p_qspi_dev =
CONTAINER_OF(p_blk_dev, nrf_block_dev_qspi_t, block_dev);
nrf_block_dev_qspi_work_t const * p_work = p_qspi_dev->p_work;
return &p_work->geometry;
}
const nrf_block_dev_ops_t nrf_block_device_qspi_ops = {
.init = block_dev_qspi_init,
.uninit = block_dev_qspi_uninit,
.read_req = block_dev_qspi_read_req,
.write_req = block_dev_qspi_write_req,
.ioctl = block_dev_qspi_ioctl,
.geometry = block_dev_qspi_geometry,
};
/** @} */

View File

@@ -0,0 +1,173 @@
/**
* Copyright (c) 2016 - 2017, Nordic Semiconductor ASA
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form, except as embedded into a Nordic
* Semiconductor ASA integrated circuit in a product or a software update for
* such product, must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other
* materials provided with the distribution.
*
* 3. Neither the name of Nordic Semiconductor ASA nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* 4. This software, with or without modification, must only be used with a
* Nordic Semiconductor ASA integrated circuit.
*
* 5. Any software provided in binary form under this license must not be reverse
* engineered, decompiled, modified and/or disassembled.
*
* THIS SOFTWARE IS PROVIDED BY NORDIC SEMICONDUCTOR ASA "AS IS" AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NORDIC SEMICONDUCTOR ASA OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef NRF_BLOCK_DEV_QSPI_H__
#define NRF_BLOCK_DEV_QSPI_H__
#ifdef __cplusplus
extern "C" {
#endif
#include "nrf_block_dev.h"
#include "nrf_drv_qspi.h"
/**@file
*
* @defgroup nrf_block_dev_qspi QSPI implementation
* @ingroup nrf_block_dev
* @{
*
*/
/**
* @brief QSPI block device operations
* */
extern const nrf_block_dev_ops_t nrf_block_device_qspi_ops;
/**
* @brief QSPI block device internal erase unit buffer size
* */
#define NRF_BLOCK_DEV_QSPI_ERASE_UNIT_SIZE (4096)
/**
* @brief Internal Block device state
*/
typedef enum {
NRF_BLOCK_DEV_QSPI_STATE_DISABLED = 0, /**< QSPI block device state DISABLED */
NRF_BLOCK_DEV_QSPI_STATE_IDLE, /**< QSPI block device state IDLE */
NRF_BLOCK_DEV_QSPI_STATE_READ_EXEC, /**< QSPI block device state READ_EXEC */
NRF_BLOCK_DEV_QSPI_STATE_EUNIT_LOAD, /**< QSPI block device state EUNIT_LOAD */
NRF_BLOCK_DEV_QSPI_STATE_WRITE_ERASE, /**< QSPI block device state WRITE_ERASE */
NRF_BLOCK_DEV_QSPI_STATE_WRITE_EXEC, /**< QSPI block device state WRITE_EXEC */
} nrf_block_dev_qspi_state_t;
/**
* @brief Work structure of QSPI block device
*/
typedef struct {
volatile nrf_block_dev_qspi_state_t state; //!< QSPI block device state
nrf_block_dev_geometry_t geometry; //!< Block device geometry
nrf_block_dev_ev_handler ev_handler; //!< Block device event handler
void const * p_context; //!< Context handle passed to event handler
nrf_block_req_t req; //!< Block READ/WRITE request: original value
nrf_block_req_t left_req; //!< Block READ/WRITE request: left value
bool cache_flushing; //!< QSPI cache flush in progress flag
bool writeback_mode; //!< QSPI write-back mode flag
bool erase_required; //!< QSPI erase required flag
uint32_t erase_unit_idx; //!< QSPI erase unit index
uint32_t erase_unit_dirty_blocks; //!< QSPI erase unit dirty blocks mask
uint8_t p_erase_unit_buff[NRF_BLOCK_DEV_QSPI_ERASE_UNIT_SIZE]; //!< QSPI erase unit buffer (fixed value)
} nrf_block_dev_qspi_work_t;
/**
* @brief QSPI block device flags*/
typedef enum {
NRF_BLOCK_DEV_QSPI_FLAG_CACHE_WRITEBACK = (1u << 0) //!< Cache write-back mode enable flag
} nrf_block_dev_qspi_flag_t;
/**
* @brief QSPI block device config initializer (@ref nrf_block_dev_qspi_config_t)
*
* @param blk_size Block size
* @param blk_flags Block device flags, @ref nrf_block_dev_qspi_flag_t
* @param qspi_drv_config QPSI driver config
* */
#define NRF_BLOCK_DEV_QSPI_CONFIG(blk_size, blk_flags, qspi_drv_config) { \
.block_size = (blk_size), \
.flags = (blk_flags), \
.qspi_config = qspi_drv_config \
}
/**
* @brief QSPI block device config
*/
typedef struct {
uint32_t block_size; //!< Desired block size
uint32_t flags; //!< QSPI block device flags
nrf_drv_qspi_config_t qspi_config; //!< QSPI configuration
} nrf_block_dev_qspi_config_t;
/**
* @brief QSPI block device
* */
typedef struct {
nrf_block_dev_t block_dev; //!< Block device
nrf_block_dev_info_strings_t info_strings; //!< Block device information strings
nrf_block_dev_qspi_config_t qspi_bdev_config; //!< QSPI block device config
nrf_block_dev_qspi_work_t * p_work; //!< QSPI block device work structure
} nrf_block_dev_qspi_t;
/**
* @brief Defines a QSPI block device.
*
* @param name Instance name
* @param config Configuration @ref nrf_block_dev_qspi_config_t
* @param info Info strings @ref NFR_BLOCK_DEV_INFO_CONFIG
* */
#define NRF_BLOCK_DEV_QSPI_DEFINE(name, config, info) \
static nrf_block_dev_qspi_work_t CONCAT_2(name, _work); \
static const nrf_block_dev_qspi_t name = { \
.block_dev = { .p_ops = &nrf_block_device_qspi_ops }, \
.info_strings = BRACKET_EXTRACT(info), \
.qspi_bdev_config = config, \
.p_work = &CONCAT_2(name, _work), \
}
/**
* @brief Returns block device API handle from QSPI block device.
*
* @param[in] p_blk_qspi QSPI block device
* @return Block device handle
*/
static inline nrf_block_dev_t const *
nrf_block_dev_qspi_ops_get(nrf_block_dev_qspi_t const * p_blk_qspi)
{
return &p_blk_qspi->block_dev;
}
/** @} */
#ifdef __cplusplus
}
#endif
#endif /* NRF_BLOCK_DEV_QSPI_H__ */

View File

@@ -0,0 +1,66 @@
/**
* Copyright (c) 2016 - 2017, Nordic Semiconductor ASA
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form, except as embedded into a Nordic
* Semiconductor ASA integrated circuit in a product or a software update for
* such product, must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other
* materials provided with the distribution.
*
* 3. Neither the name of Nordic Semiconductor ASA nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* 4. This software, with or without modification, must only be used with a
* Nordic Semiconductor ASA integrated circuit.
*
* 5. Any software provided in binary form under this license must not be reverse
* engineered, decompiled, modified and/or disassembled.
*
* THIS SOFTWARE IS PROVIDED BY NORDIC SEMICONDUCTOR ASA "AS IS" AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NORDIC SEMICONDUCTOR ASA OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "nrf_serial_flash_params.h"
static const nrf_serial_flash_params_t m_sflash_params[] = {
{ /*MXIC MX25R6435F*/
.read_id = { 0xC2, 0x28, 0x17 },
.capabilities = 0x00,
.size = 8 * 1024 * 1024,
.erase_size = 4 * 1024,
.program_size = 256,
}
};
nrf_serial_flash_params_t const * nrf_serial_flash_params_get(const uint8_t * p_read_id)
{
size_t i;
for (i = 0; i < ARRAY_SIZE(m_sflash_params); ++i)
{
if (memcmp(m_sflash_params[i].read_id, p_read_id, sizeof(m_sflash_params[i].read_id)) == 0)
{
return &m_sflash_params[i];
}
}
return NULL;
}

View File

@@ -0,0 +1,84 @@
/**
* Copyright (c) 2016 - 2017, Nordic Semiconductor ASA
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form, except as embedded into a Nordic
* Semiconductor ASA integrated circuit in a product or a software update for
* such product, must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other
* materials provided with the distribution.
*
* 3. Neither the name of Nordic Semiconductor ASA nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* 4. This software, with or without modification, must only be used with a
* Nordic Semiconductor ASA integrated circuit.
*
* 5. Any software provided in binary form under this license must not be reverse
* engineered, decompiled, modified and/or disassembled.
*
* THIS SOFTWARE IS PROVIDED BY NORDIC SEMICONDUCTOR ASA "AS IS" AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NORDIC SEMICONDUCTOR ASA OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef NRF_SERIAL_FLASH_PARAMS_H__
#define NRF_SERIAL_FLASH_PARAMS_H__
#ifdef __cplusplus
extern "C" {
#endif
#include "sdk_common.h"
/**@file
*
* @defgroup nrf_serial_flash_params Serial flash memory parameters
* @ingroup nrf_block_dev
* @{
*
*/
/**
* @brief Serial flash memory parameters
* */
typedef struct {
uint8_t read_id[3]; //!< Read identification command (0x9F) result
uint8_t capabilities; //!< Serial flash memory capabilities
uint32_t size; //!< Serial flash memory size (bytes)
uint32_t erase_size; //!< Serial flash memory erase unit size (bytes)
uint32_t program_size; //!< Serial flash memory program size (bytes)
} nrf_serial_flash_params_t;
/**
* @brief Returns serial flash memory identification descriptor
*
* @param p_read_params Memory read identification command result
*
* @return Serial flash memory descriptor (NULL if not found)
* */
nrf_serial_flash_params_t const * nrf_serial_flash_params_get(const uint8_t * p_read_params);
/** @} */
#ifdef __cplusplus
}
#endif
#endif /* NRF_SERIAL_FLASH_PARAMS_H__ */

View File

@@ -0,0 +1,206 @@
/**
* Copyright (c) 2016 - 2017, Nordic Semiconductor ASA
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form, except as embedded into a Nordic
* Semiconductor ASA integrated circuit in a product or a software update for
* such product, must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other
* materials provided with the distribution.
*
* 3. Neither the name of Nordic Semiconductor ASA nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* 4. This software, with or without modification, must only be used with a
* Nordic Semiconductor ASA integrated circuit.
*
* 5. Any software provided in binary form under this license must not be reverse
* engineered, decompiled, modified and/or disassembled.
*
* THIS SOFTWARE IS PROVIDED BY NORDIC SEMICONDUCTOR ASA "AS IS" AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NORDIC SEMICONDUCTOR ASA OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "nrf_block_dev_ram.h"
/**@file
*
* @ingroup nrf_block_dev
* @{
*
* @brief This module implements block device API. It should be used as a reference block device.
*/
static ret_code_t block_dev_ram_init(nrf_block_dev_t const * p_blk_dev,
nrf_block_dev_ev_handler ev_handler,
void const * p_context)
{
ASSERT(p_blk_dev);
nrf_block_dev_ram_t const * p_ram_dev = CONTAINER_OF(p_blk_dev, nrf_block_dev_ram_t, block_dev);
nrf_block_dev_ram_work_t * p_work = p_ram_dev->p_work;
/* Calculate block device geometry.... */
p_work->geometry.blk_size = p_ram_dev->ram_config.block_size;
p_work->geometry.blk_count = p_ram_dev->ram_config.size /
p_ram_dev->ram_config.block_size;
p_work->p_context = p_context;
p_work->ev_handler = ev_handler;
if (p_work->ev_handler)
{
/*Asynchronous operation (simulation)*/
const nrf_block_dev_event_t ev = {
NRF_BLOCK_DEV_EVT_INIT,
NRF_BLOCK_DEV_RESULT_SUCCESS,
NULL,
p_work->p_context
};
p_work->ev_handler(p_blk_dev, &ev);
}
return NRF_SUCCESS;
}
static ret_code_t block_dev_ram_uninit(nrf_block_dev_t const * p_blk_dev)
{
ASSERT(p_blk_dev);
nrf_block_dev_ram_t const * p_ram_dev = CONTAINER_OF(p_blk_dev, nrf_block_dev_ram_t, block_dev);
nrf_block_dev_ram_work_t * p_work = p_ram_dev->p_work;
if (p_work->ev_handler)
{
/*Asynchronous operation (simulation)*/
const nrf_block_dev_event_t ev = {
NRF_BLOCK_DEV_EVT_UNINIT,
NRF_BLOCK_DEV_RESULT_SUCCESS,
NULL,
p_work->p_context
};
p_work->ev_handler(p_blk_dev, &ev);
}
memset(p_work, 0, sizeof(nrf_block_dev_ram_work_t));
return NRF_SUCCESS;
}
static ret_code_t block_dev_ram_req(nrf_block_dev_t const * p_blk_dev,
nrf_block_req_t const * p_blk,
nrf_block_dev_event_type_t event)
{
ASSERT(p_blk_dev);
ASSERT(p_blk);
nrf_block_dev_ram_t const * p_ram_dev = CONTAINER_OF(p_blk_dev, nrf_block_dev_ram_t, block_dev);
nrf_block_dev_ram_config_t const * p_ram_config = &p_ram_dev->ram_config;
nrf_block_dev_ram_work_t const * p_work = p_ram_dev->p_work;
/*Synchronous operation*/
uint8_t * p_buff = p_ram_config->p_work_buffer;
p_buff += p_blk->blk_id * p_work->geometry.blk_size;
const void * p_src = (event == NRF_BLOCK_DEV_EVT_BLK_READ_DONE) ? p_buff : p_blk->p_buff;
void * p_dst = (event == NRF_BLOCK_DEV_EVT_BLK_READ_DONE) ? p_blk->p_buff : p_buff;
memcpy(p_dst, p_src, p_work->geometry.blk_size * p_blk->blk_count);
if (p_work->ev_handler)
{
/*Asynchronous operation (simulation)*/
const nrf_block_dev_event_t ev = {
event,
NRF_BLOCK_DEV_RESULT_SUCCESS,
p_blk,
p_work->p_context
};
p_work->ev_handler(p_blk_dev, &ev);
}
return NRF_SUCCESS;
}
static ret_code_t block_dev_ram_read_req(nrf_block_dev_t const * p_blk_dev,
nrf_block_req_t const * p_blk)
{
return block_dev_ram_req(p_blk_dev, p_blk, NRF_BLOCK_DEV_EVT_BLK_READ_DONE);
}
static ret_code_t block_dev_ram_write_req(nrf_block_dev_t const * p_blk_dev,
nrf_block_req_t const * p_blk)
{
return block_dev_ram_req(p_blk_dev, p_blk, NRF_BLOCK_DEV_EVT_BLK_WRITE_DONE);
}
static ret_code_t block_dev_ram_ioctl(nrf_block_dev_t const * p_blk_dev,
nrf_block_dev_ioctl_req_t req,
void * p_data)
{
nrf_block_dev_ram_t const * p_ram_dev = CONTAINER_OF(p_blk_dev, nrf_block_dev_ram_t, block_dev);
switch (req)
{
case NRF_BLOCK_DEV_IOCTL_REQ_CACHE_FLUSH:
{
bool * p_flushing = p_data;
if (p_flushing)
{
*p_flushing = false;
}
return NRF_SUCCESS;
}
case NRF_BLOCK_DEV_IOCTL_REQ_INFO_STRINGS:
{
if (p_data == NULL)
{
return NRF_ERROR_INVALID_PARAM;
}
nrf_block_dev_info_strings_t const * * pp_strings = p_data;
*pp_strings = &p_ram_dev->info_strings;
return NRF_SUCCESS;
}
default:
break;
}
return NRF_ERROR_NOT_SUPPORTED;
}
static nrf_block_dev_geometry_t const * block_dev_ram_geometry(nrf_block_dev_t const * p_blk_dev)
{
ASSERT(p_blk_dev);
nrf_block_dev_ram_t const * p_ram_dev = CONTAINER_OF(p_blk_dev, nrf_block_dev_ram_t, block_dev);
nrf_block_dev_ram_work_t const * p_work = p_ram_dev->p_work;
return &p_work->geometry;
}
const nrf_block_dev_ops_t nrf_block_device_ram_ops = {
.init = block_dev_ram_init,
.uninit = block_dev_ram_uninit,
.read_req = block_dev_ram_read_req,
.write_req = block_dev_ram_write_req,
.ioctl = block_dev_ram_ioctl,
.geometry = block_dev_ram_geometry,
};
/** @} */

View File

@@ -0,0 +1,141 @@
/**
* Copyright (c) 2016 - 2017, Nordic Semiconductor ASA
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form, except as embedded into a Nordic
* Semiconductor ASA integrated circuit in a product or a software update for
* such product, must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other
* materials provided with the distribution.
*
* 3. Neither the name of Nordic Semiconductor ASA nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* 4. This software, with or without modification, must only be used with a
* Nordic Semiconductor ASA integrated circuit.
*
* 5. Any software provided in binary form under this license must not be reverse
* engineered, decompiled, modified and/or disassembled.
*
* THIS SOFTWARE IS PROVIDED BY NORDIC SEMICONDUCTOR ASA "AS IS" AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NORDIC SEMICONDUCTOR ASA OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef NRF_BLOCK_DEV_RAM_H__
#define NRF_BLOCK_DEV_RAM_H__
#ifdef __cplusplus
extern "C" {
#endif
#include "nrf_block_dev.h"
/**@file
*
* @defgroup nrf_block_dev_ram RAM implementation
* @ingroup nrf_block_dev
* @{
*
* @brief This module implements block device API. It should be used as a reference block device.
*/
/**
* @brief RAM block device operations
* */
extern const nrf_block_dev_ops_t nrf_block_device_ram_ops;
/**
* @brief Work structure of RAM block device
*/
typedef struct {
nrf_block_dev_geometry_t geometry; //!< Block device geometry
nrf_block_dev_ev_handler ev_handler; //!< Block device event handler
void const * p_context; //!< Context handle passed to event handler
} nrf_block_dev_ram_work_t;
/**
* @brief RAM block device config initializer (@ref nrf_block_dev_ram_config_t)
*
* @param blk_size Block size
* @param buffer RAM work buffer
* @param buffer_size RAM work buffer size
* */
#define NRF_BLOCK_DEV_RAM_CONFIG(blk_size, buffer, buffer_size) { \
.block_size = (blk_size), \
.p_work_buffer = (buffer), \
.size = (buffer_size), \
}
/**
* @brief Ram block device config
*/
typedef struct {
uint32_t block_size; //!< Desired block size
void * p_work_buffer; //!< Ram work buffer
size_t size; //!< Ram work buffer size
} nrf_block_dev_ram_config_t;
/**
* @brief Ram block device
* */
typedef struct {
nrf_block_dev_t block_dev; //!< Block device
nrf_block_dev_info_strings_t info_strings; //!< Block device information strings
nrf_block_dev_ram_config_t ram_config; //!< Ram block device config
nrf_block_dev_ram_work_t * p_work; //!< Ram block device work structure
} nrf_block_dev_ram_t;
/**
* @brief Defines a RAM block device.
*
* @param name Instance name
* @param config Configuration @ref nrf_block_dev_ram_config_t
* @param info Info strings @ref NFR_BLOCK_DEV_INFO_CONFIG
* */
#define NRF_BLOCK_DEV_RAM_DEFINE(name, config, info) \
static nrf_block_dev_ram_work_t CONCAT_2(name, _work); \
static const nrf_block_dev_ram_t name = { \
.block_dev = { .p_ops = &nrf_block_device_ram_ops }, \
.info_strings = BRACKET_EXTRACT(info), \
.ram_config = config, \
.p_work = &CONCAT_2(name, _work), \
}
/**
* @brief Returns block device API handle from RAM block device.
*
* @param[in] p_blk_ram Ram block device
* @return Block device handle
*/
static inline nrf_block_dev_t const *
nrf_block_dev_ram_ops_get(nrf_block_dev_ram_t const * p_blk_ram)
{
return &p_blk_ram->block_dev;
}
/** @} */
#ifdef __cplusplus
}
#endif
#endif /* NRF_BLOCK_DEV_RAM_H__ */

View File

@@ -0,0 +1,393 @@
/**
* Copyright (c) 2016 - 2017, Nordic Semiconductor ASA
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form, except as embedded into a Nordic
* Semiconductor ASA integrated circuit in a product or a software update for
* such product, must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other
* materials provided with the distribution.
*
* 3. Neither the name of Nordic Semiconductor ASA nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* 4. This software, with or without modification, must only be used with a
* Nordic Semiconductor ASA integrated circuit.
*
* 5. Any software provided in binary form under this license must not be reverse
* engineered, decompiled, modified and/or disassembled.
*
* THIS SOFTWARE IS PROVIDED BY NORDIC SEMICONDUCTOR ASA "AS IS" AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NORDIC SEMICONDUCTOR ASA OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "nrf_block_dev_sdc.h"
/**@file
*
* @ingroup nrf_block_dev_sdc
* @{
*
* @brief This module implements block device API. It should be used as a reference block device.
*/
static volatile sdc_result_t m_last_result;
/**
* @brief Active SDC block device handle. Only one instance.
* */
static nrf_block_dev_sdc_t const * m_active_sdc_dev;
static void wait_func(void)
{
}
static void sdc_wait()
{
while (app_sdc_busy_check())
{
wait_func();
}
}
static void sdc_handler(sdc_evt_t const * p_event)
{
m_last_result = p_event->result;
nrf_block_dev_sdc_t const * p_sdc_dev = m_active_sdc_dev;
nrf_block_dev_sdc_work_t * p_work = p_sdc_dev->p_work;
switch (p_event->type)
{
case SDC_EVT_INIT:
{
p_work->geometry.blk_count = app_sdc_info_get()->num_blocks;
p_work->geometry.blk_size = SDC_SECTOR_SIZE;
if (m_active_sdc_dev->p_work->ev_handler)
{
const nrf_block_dev_event_t ev = {
NRF_BLOCK_DEV_EVT_INIT,
((p_event->result == SDC_SUCCESS) ? \
NRF_BLOCK_DEV_RESULT_SUCCESS : NRF_BLOCK_DEV_RESULT_IO_ERROR),
NULL,
p_work->p_context
};
p_work->ev_handler(&p_sdc_dev->block_dev, &ev);
}
}
break;
case SDC_EVT_READ:
if (m_active_sdc_dev->p_work->ev_handler)
{
const nrf_block_dev_event_t ev = {
NRF_BLOCK_DEV_EVT_BLK_READ_DONE,
((p_event->result == SDC_SUCCESS) ? \
NRF_BLOCK_DEV_RESULT_SUCCESS : NRF_BLOCK_DEV_RESULT_IO_ERROR),
&p_work->req,
p_work->p_context
};
p_work->ev_handler(&p_sdc_dev->block_dev, &ev);
}
break;
case SDC_EVT_WRITE:
if (m_active_sdc_dev->p_work->ev_handler)
{
const nrf_block_dev_event_t ev = {
NRF_BLOCK_DEV_EVT_BLK_WRITE_DONE,
((p_event->result == SDC_SUCCESS) ? \
NRF_BLOCK_DEV_RESULT_SUCCESS : NRF_BLOCK_DEV_RESULT_IO_ERROR),
&p_work->req,
p_work->p_context
};
p_work->ev_handler(&p_sdc_dev->block_dev, &ev);
}
break;
default:
APP_ERROR_CHECK(NRF_ERROR_INTERNAL);
return;
}
}
static ret_code_t block_dev_sdc_init(nrf_block_dev_t const * p_blk_dev,
nrf_block_dev_ev_handler ev_handler,
void const * p_context)
{
ASSERT(p_blk_dev);
nrf_block_dev_sdc_t const * p_sdc_dev =
CONTAINER_OF(p_blk_dev, nrf_block_dev_sdc_t, block_dev);
nrf_block_dev_sdc_work_t * p_work = p_sdc_dev->p_work;
if (p_sdc_dev->sdc_bdev_config.block_size != SDC_SECTOR_SIZE)
{
/* Unsupported block size. */
return NRF_ERROR_NOT_SUPPORTED;
}
if (m_active_sdc_dev)
{
/* SDC instance is busy. */
return NRF_ERROR_BUSY;
}
p_work->p_context = p_context;
p_work->ev_handler = ev_handler;
m_active_sdc_dev = p_sdc_dev;
ret_code_t err_code = NRF_SUCCESS;
err_code = app_sdc_init(&p_sdc_dev->sdc_bdev_config.sdc_config, sdc_handler);
if (err_code == NRF_SUCCESS)
{
if (!ev_handler)
{
/* Synchronous mode - wait for the card. */
sdc_wait();
err_code = ((m_last_result == SDC_SUCCESS) ? NRF_SUCCESS : NRF_ERROR_TIMEOUT);
}
}
if (err_code != NRF_SUCCESS)
{
m_active_sdc_dev = NULL;
if (ev_handler)
{
/* Call the user handler with an error status. */
const nrf_block_dev_event_t ev = {
NRF_BLOCK_DEV_EVT_INIT,
NRF_BLOCK_DEV_RESULT_IO_ERROR,
NULL,
p_work->p_context
};
p_work->ev_handler(p_blk_dev, &ev);
}
}
return err_code;
}
static ret_code_t block_dev_sdc_uninit(nrf_block_dev_t const * p_blk_dev)
{
ASSERT(p_blk_dev);
nrf_block_dev_sdc_t const * p_sdc_dev =
CONTAINER_OF(p_blk_dev, nrf_block_dev_sdc_t, block_dev);
nrf_block_dev_sdc_work_t * p_work = p_sdc_dev->p_work;
if (m_active_sdc_dev != p_sdc_dev)
{
/* SDC instance is busy. */
return NRF_ERROR_BUSY;
}
if (app_sdc_busy_check())
{
/* Previous asynchronous operation in progress. */
return NRF_ERROR_BUSY;
}
ret_code_t err_code = app_sdc_uninit();
if (err_code == NRF_SUCCESS)
{
/* Free the instance on success. */
m_active_sdc_dev = NULL;
}
if (p_work->ev_handler)
{
/* SDC uninitialization is a synchronous operation. Call event handler. */
const nrf_block_dev_event_t ev = {
NRF_BLOCK_DEV_EVT_UNINIT,
((err_code == NRF_SUCCESS) ? \
NRF_BLOCK_DEV_RESULT_SUCCESS : NRF_BLOCK_DEV_RESULT_IO_ERROR),
NULL,
p_work->p_context
};
p_work->ev_handler(p_blk_dev, &ev);
}
return err_code;
}
static ret_code_t block_dev_sdc_read_req(nrf_block_dev_t const * p_blk_dev,
nrf_block_req_t const * p_blk)
{
ASSERT(p_blk_dev);
ASSERT(p_blk);
nrf_block_dev_sdc_t const * p_sdc_dev =
CONTAINER_OF(p_blk_dev, nrf_block_dev_sdc_t, block_dev);
nrf_block_dev_sdc_work_t * p_work = p_sdc_dev->p_work;
ret_code_t err_code = NRF_SUCCESS;
if (m_active_sdc_dev != p_sdc_dev)
{
/* SDC instance is busy. */
return NRF_ERROR_BUSY;
}
if (app_sdc_busy_check())
{
/* Previous asynchronous operation in progress. */
return NRF_ERROR_BUSY;
}
p_work->req = *p_blk;
err_code = app_sdc_block_read(p_blk->p_buff, p_blk->blk_id, p_blk->blk_count);
if (err_code == NRF_SUCCESS)
{
if (!p_work->ev_handler)
{
/* Synchronous mode - wait for the card. */
sdc_wait();
err_code = ((m_last_result == SDC_SUCCESS) ? NRF_SUCCESS : NRF_ERROR_TIMEOUT);
}
}
if ((p_work->ev_handler) && (err_code != NRF_SUCCESS))
{
/* Call the user handler with an error status. */
const nrf_block_dev_event_t ev = {
NRF_BLOCK_DEV_EVT_BLK_READ_DONE,
NRF_BLOCK_DEV_RESULT_IO_ERROR,
&p_work->req,
p_work->p_context
};
p_work->ev_handler(p_blk_dev, &ev);
}
return err_code;
}
static ret_code_t block_dev_sdc_write_req(nrf_block_dev_t const * p_blk_dev,
nrf_block_req_t const * p_blk)
{
ASSERT(p_blk_dev);
ASSERT(p_blk);
nrf_block_dev_sdc_t const * p_sdc_dev =
CONTAINER_OF(p_blk_dev, nrf_block_dev_sdc_t, block_dev);
nrf_block_dev_sdc_work_t * p_work = p_sdc_dev->p_work;
ret_code_t err_code = NRF_SUCCESS;
if (m_active_sdc_dev != p_sdc_dev)
{
/* SDC instance is busy. */
return NRF_ERROR_BUSY;
}
if (app_sdc_busy_check())
{
/* Previous asynchronous operation in progress. */
return NRF_ERROR_BUSY;
}
p_work->req = *p_blk;
err_code = app_sdc_block_write(p_blk->p_buff, p_blk->blk_id, p_blk->blk_count);
if (err_code == NRF_SUCCESS)
{
if (!p_work->ev_handler)
{
/* Synchronous mode - wait for the card. */
sdc_wait();
err_code = ((m_last_result == SDC_SUCCESS) ? NRF_SUCCESS : NRF_ERROR_TIMEOUT);
}
}
if ((p_work->ev_handler) && (err_code != NRF_SUCCESS))
{
/* Call the user handler with an error status. */
const nrf_block_dev_event_t ev = {
NRF_BLOCK_DEV_EVT_BLK_READ_DONE,
NRF_BLOCK_DEV_RESULT_IO_ERROR,
&p_work->req,
p_work->p_context
};
p_work->ev_handler(p_blk_dev, &ev);
}
return err_code;
}
static ret_code_t block_dev_sdc_ioctl(nrf_block_dev_t const * p_blk_dev,
nrf_block_dev_ioctl_req_t req,
void * p_data)
{
nrf_block_dev_sdc_t const * p_sdc_dev =
CONTAINER_OF(p_blk_dev, nrf_block_dev_sdc_t, block_dev);
switch (req)
{
case NRF_BLOCK_DEV_IOCTL_REQ_CACHE_FLUSH:
{
bool * p_flushing = p_data;
if (p_flushing)
{
*p_flushing = false;
}
return NRF_SUCCESS;
}
case NRF_BLOCK_DEV_IOCTL_REQ_INFO_STRINGS:
{
if (p_data == NULL)
{
return NRF_ERROR_INVALID_PARAM;
}
nrf_block_dev_info_strings_t const * * pp_strings = p_data;
*pp_strings = &p_sdc_dev->info_strings;
return NRF_SUCCESS;
}
default:
break;
}
return NRF_ERROR_NOT_SUPPORTED;
}
static nrf_block_dev_geometry_t const * block_dev_sdc_geometry(nrf_block_dev_t const * p_blk_dev)
{
ASSERT(p_blk_dev);
nrf_block_dev_sdc_t const * p_sdc_dev =
CONTAINER_OF(p_blk_dev, nrf_block_dev_sdc_t, block_dev);
nrf_block_dev_sdc_work_t const * p_work = p_sdc_dev->p_work;
return &p_work->geometry;
}
const nrf_block_dev_ops_t nrf_block_device_sdc_ops = {
.init = block_dev_sdc_init,
.uninit = block_dev_sdc_uninit,
.read_req = block_dev_sdc_read_req,
.write_req = block_dev_sdc_write_req,
.ioctl = block_dev_sdc_ioctl,
.geometry = block_dev_sdc_geometry,
};
/** @} */

View File

@@ -0,0 +1,141 @@
/**
* Copyright (c) 2016 - 2017, Nordic Semiconductor ASA
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form, except as embedded into a Nordic
* Semiconductor ASA integrated circuit in a product or a software update for
* such product, must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other
* materials provided with the distribution.
*
* 3. Neither the name of Nordic Semiconductor ASA nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* 4. This software, with or without modification, must only be used with a
* Nordic Semiconductor ASA integrated circuit.
*
* 5. Any software provided in binary form under this license must not be reverse
* engineered, decompiled, modified and/or disassembled.
*
* THIS SOFTWARE IS PROVIDED BY NORDIC SEMICONDUCTOR ASA "AS IS" AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NORDIC SEMICONDUCTOR ASA OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/**@file
*
* @defgroup nrf_block_dev_sdc SDC implementation
* @ingroup nrf_block_dev
* @{
*
*/
#ifndef NRF_BLOCK_DEV_SDC_H__
#define NRF_BLOCK_DEV_SDC_H__
#ifdef __cplusplus
extern "C" {
#endif
#include "nrf_block_dev.h"
#include "app_sdcard.h"
/**
* @brief SDC block device operations
* */
extern const nrf_block_dev_ops_t nrf_block_device_sdc_ops;
/**
* @brief Work structure of SDC block device
*/
typedef struct {
nrf_block_dev_geometry_t geometry; //!< Block device geometry
nrf_block_dev_ev_handler ev_handler; //!< Block device event handler
nrf_block_req_t req; //!< Block READ/WRITE request
void const * p_context; //!< Context handle passed to event handler
} nrf_block_dev_sdc_work_t;
/**
* @brief SDC block device config initializer (@ref nrf_block_dev_sdc_config_t)
*
* @param blk_size Block size
* @param sdc_lib_config SDC library config (@ref app_sdc_config_t)
* */
#define NRF_BLOCK_DEV_SDC_CONFIG(blk_size, sdc_lib_config) \
{ \
.block_size = (blk_size), \
.sdc_config = sdc_lib_config \
}
/**
* @brief SDC block device config
*/
typedef struct {
uint32_t block_size; //!< Desired block size
app_sdc_config_t sdc_config; //!< SDC library configuration
} nrf_block_dev_sdc_config_t;
/**
* @brief SDC block device
* */
typedef struct {
nrf_block_dev_t block_dev; //!< Block device
nrf_block_dev_info_strings_t info_strings; //!< Block device information strings
nrf_block_dev_sdc_config_t sdc_bdev_config; //!< SDC block device config
nrf_block_dev_sdc_work_t * p_work; //!< SDC block device work structure
} nrf_block_dev_sdc_t;
/**
* @brief Defines a SDC block device.
*
* @param name Instance name
* @param config Configuration @ref nrf_block_dev_sdc_config_t
* @param info Info strings @ref NFR_BLOCK_DEV_INFO_CONFIG
* */
#define NRF_BLOCK_DEV_SDC_DEFINE(name, config, info) \
static nrf_block_dev_sdc_work_t CONCAT_2(name, _work); \
static const nrf_block_dev_sdc_t name = { \
.block_dev = { .p_ops = &nrf_block_device_sdc_ops }, \
.info_strings = BRACKET_EXTRACT(info), \
.sdc_bdev_config = config, \
.p_work = &CONCAT_2(name, _work), \
}
/**
* @brief Returns block device API handle from SDC block device.
*
* @param[in] p_blk_sdc SDC block device
* @return Block device handle
*/
static inline nrf_block_dev_t const *
nrf_block_dev_sdc_ops_get(nrf_block_dev_sdc_t const * p_blk_sdc)
{
return &p_blk_sdc->block_dev;
}
/** @} */
#ifdef __cplusplus
}
#endif
#endif /* NRF_BLOCK_DEV_SDC_H__ */