blob: fd0a827170e90e6fac7ab109e5bd408f6283ffa6 [file] [log] [blame]
/*******************************************************************************
Copyright (C) Marvell International Ltd. and its affiliates
This software file (the "File") is owned and distributed by Marvell
International Ltd. and/or its affiliates ("Marvell") under the following
alternative licensing terms. Once you have made an election to distribute the
File under one of the following license alternatives, please (i) delete this
introductory statement regarding license alternatives, (ii) delete the two
license alternatives that you have not elected to use and (iii) preserve the
Marvell copyright notice above.
********************************************************************************
Marvell Commercial License Option
If you received this File from Marvell and you have entered into a commercial
license agreement (a "Commercial License") with Marvell, the File is licensed
to you under the terms of the applicable Commercial License.
********************************************************************************
Marvell GPL License Option
If you received this File from Marvell, you may opt to use, redistribute and/or
modify this File in accordance with the terms and conditions of the General
Public License Version 2, June 1991 (the "GPL License"), a copy of which is
available along with the File in the license.txt file or by writing to the Free
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
DISCLAIMED. The GPL License provides additional details about this warranty
disclaimer.
********************************************************************************
Marvell BSD License Option
If you received this File from Marvell, you may opt to use, redistribute and/or
modify this File under the following licensing terms.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Marvell nor the names of its contributors may be
used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
******************************************************************************/
/*******************************************************************************
* tpm_pkt_proc_logic.c
*
* DESCRIPTION:
* Traffic Processor Manager = TPM
*
* DEPENDENCIES:
* None
*
* CREATED BY: OctaviaP
*
* DATE CREATED:
*
* FILE REVISION NUMBER:
* Revision: 1.7
*
*
*******************************************************************************/
#include "tpm_common.h"
#include "tpm_header.h"
/* Local definitions */
extern MV_STATUS mv_cust_set_tcont_state(uint32_t tcont, bool state);
typedef tpm_error_code_t (*tpm_proc_common_int_del_func_t) (uint32_t, uint32_t);
int32_t tpm_proc_virt_uni_trg_port_validation(tpm_trg_port_type_t trg_port);
#define IPV6_MOD(mod_bm) (mod_bm & (TPM_IPV6_UPDATE | TPM_HOPLIM_DEC | TPM_IPV6_DST_SET | TPM_IPV6_SRC_SET))
#define IPV4_MOD(mod_bm) (mod_bm & (TPM_IPV4_UPDATE | TPM_TTL_DEC | TPM_IPV4_DST_SET | TPM_IPV4_SRC_SET))
#define L4_CHECK_MOD(mod_bm) ((mod_bm & TPM_IPV4_DST_SET) || (mod_bm & TPM_IPV4_SRC_SET) || \
(mod_bm & TPM_L4_DST_SET) || (mod_bm & TPM_L4_SRC_SET))
#define PARSE_FLAG_CHECK(val, bit) ((val >> bit) & TPM_PARSE_FLAG_CHECK_TRUE_FALSE)
#define IF_ERROR(ret) \
if (ret != TPM_OK) {\
TPM_OS_ERROR(TPM_TPM_LOG_MOD, " recvd ret_code(%d)\n", ret);\
return(ret);\
}
#define IF_ERROR_I(ret, i) \
if (ret != TPM_OK) {\
TPM_OS_ERROR(TPM_TPM_LOG_MOD, " recvd ret_code(%d), ind(%d)\n", ret, i);\
return(ret);\
}
#define MULTI_IP_2_MAC(macAdd, multiIp) ((uint8_t *)(macAdd))[0] = (uint8_t) 0x01; \
((uint8_t *)(macAdd))[1] = (uint8_t) 0x00; \
((uint8_t *)(macAdd))[2] = (uint8_t) 0x5e; \
((uint8_t *)(macAdd))[3] = (uint8_t) multiIp[1]; \
((uint8_t *)(macAdd))[4] = (uint8_t) multiIp[2]; \
((uint8_t *)(macAdd))[5] = (uint8_t) multiIp[3];
#define MULTI_IPV6_2_MAC(macAdd, multiIp) ((uint8_t *)(macAdd))[0] = (uint8_t) 0x33; \
((uint8_t *)(macAdd))[1] = (uint8_t) 0x33; \
((uint8_t *)(macAdd))[2] = (uint8_t) multiIp[12]; \
((uint8_t *)(macAdd))[3] = (uint8_t) multiIp[13]; \
((uint8_t *)(macAdd))[4] = (uint8_t) multiIp[14]; \
((uint8_t *)(macAdd))[5] = (uint8_t) multiIp[15];
#define NO_FREE_ENTRIES() \
if (free_entries == 0) {\
TPM_OS_ERROR(TPM_TPM_LOG_MOD, "No free entries\n");\
return(TPM_FAIL);\
}
/* Local variables */
spinlock_t tpm_proc_api_call_lock;
static tpm_cpu_loopback_t gs_cpu_loopback[TPM_MAX_CPU_LOOPBACK_NUM];
static uint32_t gn_cpu_lpbk_entry_num = 0;
static uint8_t tpm_igmp_gen_query_mac[6] = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x01};
static uint8_t tpm_mld_gen_query_mac[6] = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x01};
/* Bitmap of PNC port_ids */
static uint32_t gmac_pnc_bm[3] = { TPM_BM_GMAC_0, TPM_BM_GMAC_1, TPM_BM_PMAC};
tpm_hot_swap_acl_recovery_t tpm_hot_swap_acl_recovery[] = {
{TPM_API_MAC_LEARN, tpm_acl_rcvr_func_mac_learn},
{TPM_API_DS_LOAD_BALANCE, tpm_acl_rcvr_func_ds_load_balance},
{TPM_API_CPU_LOOPBACK, tpm_acl_rcvr_func_cpu_loopback},
{TPM_API_L2_PRIM, tpm_acl_rcvr_func_l2_prim},
{TPM_API_L3_TYPE, tpm_acl_rcvr_func_l3_type},
{TPM_API_IPV4, tpm_acl_rcvr_func_ipv4},
{TPM_API_IPV4_MC, tpm_acl_rcvr_func_ipv4_mc},
{TPM_API_IPV6_GEN, tpm_acl_rcvr_func_ipv6_gen},
{TPM_API_IPV6_DIP, tpm_acl_rcvr_func_ipv6_dip},
{TPM_API_IPV6_MC, tpm_acl_rcvr_func_ipv6_mc},
{TPM_API_IPV6_NH, tpm_acl_rcvr_func_ipv6_nh},
{TPM_API_IPV6_L4, tpm_acl_rcvr_func_ipv6_l4},
{TPM_API_CNM, tpm_acl_rcvr_func_cnm},
};
static tpm_api_sup_param_val_t api_sup_param_val[] = {
/* tpm_pnc_api_num Supported parse field bits
Supported parse flag bits
Forbidden actions */
{TPM_ADD_DS_LOAD_BALANCE_RULE, TPM_DS_LOAD_BALNC_PARSE_BM_MASK,
(TPM_PARSE_FLAG_TAG1_MASK | TPM_PARSE_FLAG_TAG2_MASK),
(0)},
{TPM_ADD_L2_PRIM_ACL_RULE, TPM_L2_PARSE_BM_MASK,
(TPM_PARSE_FLAG_TAG1_MASK | TPM_PARSE_FLAG_TAG2_MASK),
(0)},
{TPM_ADD_L3_TYPE_ACL_RULE, TPM_L3_PARSE_BM_MASK,
(TPM_PARSE_FLAG_TAG1_MASK | TPM_PARSE_FLAG_TAG2_MASK | TPM_PARSE_FLAG_MTM_MASK | TPM_PARSE_FLAG_TO_CPU_MASK),
(TPM_ACTION_MTM | TPM_ACTION_SPEC_MC_VID)},
{TPM_ADD_IPV4_ACL_RULE, TPM_IPV4_PARSE_BM_MASK,
(TPM_PARSE_FLAG_TAG1_MASK | TPM_PARSE_FLAG_PPPOE_MASK | TPM_PARSE_FLAG_MTM_MASK | TPM_PARSE_FLAG_TO_CPU_MASK),
(TPM_ACTION_MTM | TPM_ACTION_SPEC_MC_VID)},
{TPM_ADD_IPV6_NH_ACL_RULE, 0,
(TPM_PARSE_FLAG_PPPOE_MASK | TPM_PARSE_FLAG_MTM_MASK | TPM_PARSE_FLAG_TO_CPU_MASK),
(TPM_ACTION_MTM | TPM_ACTION_SPEC_MC_VID)},
{TPM_ADD_IPV6_GEN_ACL_RULE, TPM_IPV6_GEN_BM_MASK,
(TPM_PARSE_FLAG_TAG1_MASK | TPM_PARSE_FLAG_PPPOE_MASK | TPM_PARSE_FLAG_MTM_MASK | TPM_PARSE_FLAG_TO_CPU_MASK),
(TPM_ACTION_MTM | TPM_ACTION_SPEC_MC_VID)},
{TPM_ADD_IPV6_DIP_ACL_RULE, TPM_IPv6_PARSE_DIP,
(TPM_PARSE_FLAG_TAG1_MASK | TPM_PARSE_FLAG_PPPOE_MASK | TPM_PARSE_FLAG_MTM_MASK | TPM_PARSE_FLAG_TO_CPU_MASK),
(TPM_ACTION_MTM | TPM_ACTION_SPEC_MC_VID)},
{TPM_ADD_IPV6_L4_PORTS_ACL_RULE, TPM_IPV6_L4_BM_MASK,
(TPM_PARSE_FLAG_L4P_MASK | TPM_PARSE_FLAG_MTM_MASK | TPM_PARSE_FLAG_TO_CPU_MASK),
(TPM_ACTION_MTM | TPM_ACTION_SPEC_MC_VID)},
{TPM_ADD_IPV6_GEN_5T_RULE, TPM_IPV6_GEN_BM_MASK | TPM_IPV6_L4_BM_MASK,
(TPM_PARSE_FLAG_L4P_MASK | TPM_PARSE_FLAG_PPPOE_MASK | TPM_PARSE_FLAG_MTM_MASK | TPM_PARSE_FLAG_TO_CPU_MASK),
(TPM_ACTION_MTM | TPM_ACTION_SPEC_MC_VID)},
{TPM_ADD_IPV6_DIP_5T_RULE, TPM_IPv6_PARSE_DIP | TPM_IPV6_GEN_BM_MASK | TPM_IPV6_L4_BM_MASK,
(TPM_PARSE_FLAG_L4P_MASK | TPM_PARSE_FLAG_PPPOE_MASK | TPM_PARSE_FLAG_MTM_MASK | TPM_PARSE_FLAG_TO_CPU_MASK),
(TPM_ACTION_MTM | TPM_ACTION_SPEC_MC_VID)},
{TPM_ADD_IPV6_L4_PORTS_5T_RULE, TPM_IPV6_L4_BM_MASK,
(TPM_PARSE_FLAG_L4P_MASK | TPM_PARSE_FLAG_PPPOE_MASK | TPM_PARSE_FLAG_MTM_MASK | TPM_PARSE_FLAG_TO_CPU_MASK),
(TPM_ACTION_MTM | TPM_ACTION_SPEC_MC_VID)},
};
/* Function Declarations */
int32_t tpm_proc_calc_cnm_rule_num(tpm_src_port_type_t src_port, uint32_t precedence, uint32_t *rule_num);
int32_t tpm_proc_check_cnm_ipv4_pre_filter_triple_key_rule(tpm_src_port_type_t src_port, uint32_t partner_key_bm, tpm_parse_fields_t ipv4_parse_rule_bm, tpm_ipv4_acl_key_t *ipv4_key);
int32_t tpm_proc_check_cnm_ipv4_pre_filter_double_key_rule(tpm_src_port_type_t src_port, uint32_t partner_key_bm, tpm_parse_fields_t ipv4_parse_rule_bm, tpm_ipv4_acl_key_t *ipv4_key);
int32_t tpm_proc_check_cnm_ipv4_pre_filter_single_key_rule(tpm_src_port_type_t src_port, uint32_t partner_key_bm, tpm_parse_fields_t ipv4_parse_rule_bm, tpm_ipv4_acl_key_t *ipv4_key);
int32_t tpm_proc_add_cnm_ipv4_pre_filter_triple_key_rule(tpm_src_port_type_t src_port, uint32_t key_idx, uint32_t key_pattern, tpm_parse_fields_t ipv4_parse_rule_bm, tpm_ipv4_acl_key_t *ipv4_key);
int32_t tpm_proc_add_cnm_ipv4_pre_filter_double_key_rule(tpm_src_port_type_t src_port, uint32_t key_idx, uint32_t key_pattern, tpm_parse_fields_t ipv4_parse_rule_bm, tpm_ipv4_acl_key_t *ipv4_key);
int32_t tpm_proc_add_cnm_ipv4_pre_filter_single_key_rule(tpm_src_port_type_t src_port, uint32_t key_idx, uint32_t key_pattern, tpm_parse_fields_t ipv4_parse_rule_bm, tpm_ipv4_acl_key_t *ipv4_key);
int32_t tpm_proc_del_cnm_ipv4_pre_filter_rule(uint32_t src_port, uint32_t key_pattern, uint32_t key_idx);
tpm_cnm_ipv4_pre_filter_rule_oper_t gs_cnm_ipv4_pre_filter_rule_oper[TPM_CNM_IPV4_PRE_FILTER_KEY_MAX][TPM_CNM_MAX_IPV4_PRE_FILTER_RULE_PER_KEY] =
{
/* TPM_CNM_IPV4_PRE_FILTER_KEY_1 */
{
{
TPM_CNM_IPV4_PRE_FILTER_KEY_1_MASK|TPM_CNM_IPV4_PRE_FILTER_KEY_2_MASK|TPM_CNM_IPV4_PRE_FILTER_KEY_3_MASK,
tpm_proc_check_cnm_ipv4_pre_filter_triple_key_rule,
tpm_proc_add_cnm_ipv4_pre_filter_triple_key_rule,
tpm_proc_del_cnm_ipv4_pre_filter_rule,
},
{
TPM_CNM_IPV4_PRE_FILTER_KEY_1_MASK|TPM_CNM_IPV4_PRE_FILTER_KEY_2_MASK,
tpm_proc_check_cnm_ipv4_pre_filter_double_key_rule,
tpm_proc_add_cnm_ipv4_pre_filter_double_key_rule,
tpm_proc_del_cnm_ipv4_pre_filter_rule,
},
{
TPM_CNM_IPV4_PRE_FILTER_KEY_1_MASK|TPM_CNM_IPV4_PRE_FILTER_KEY_3_MASK,
tpm_proc_check_cnm_ipv4_pre_filter_double_key_rule,
tpm_proc_add_cnm_ipv4_pre_filter_double_key_rule,
tpm_proc_del_cnm_ipv4_pre_filter_rule,
},
{
TPM_CNM_IPV4_PRE_FILTER_KEY_1_MASK,
tpm_proc_check_cnm_ipv4_pre_filter_single_key_rule,
tpm_proc_add_cnm_ipv4_pre_filter_single_key_rule,
tpm_proc_del_cnm_ipv4_pre_filter_rule,
},
},
/* TPM_CNM_IPV4_PRE_FILTER_KEY_2 */
{
{
TPM_CNM_IPV4_PRE_FILTER_KEY_1_MASK|TPM_CNM_IPV4_PRE_FILTER_KEY_2_MASK|TPM_CNM_IPV4_PRE_FILTER_KEY_3_MASK,
tpm_proc_check_cnm_ipv4_pre_filter_triple_key_rule,
tpm_proc_add_cnm_ipv4_pre_filter_triple_key_rule,
tpm_proc_del_cnm_ipv4_pre_filter_rule,
},
{
TPM_CNM_IPV4_PRE_FILTER_KEY_1_MASK|TPM_CNM_IPV4_PRE_FILTER_KEY_2_MASK,
tpm_proc_check_cnm_ipv4_pre_filter_double_key_rule,
tpm_proc_add_cnm_ipv4_pre_filter_double_key_rule,
tpm_proc_del_cnm_ipv4_pre_filter_rule,
},
{
TPM_CNM_IPV4_PRE_FILTER_KEY_2_MASK|TPM_CNM_IPV4_PRE_FILTER_KEY_3_MASK,
tpm_proc_check_cnm_ipv4_pre_filter_double_key_rule,
tpm_proc_add_cnm_ipv4_pre_filter_double_key_rule,
tpm_proc_del_cnm_ipv4_pre_filter_rule,
},
{
TPM_CNM_IPV4_PRE_FILTER_KEY_2_MASK,
tpm_proc_check_cnm_ipv4_pre_filter_single_key_rule,
tpm_proc_add_cnm_ipv4_pre_filter_single_key_rule,
tpm_proc_del_cnm_ipv4_pre_filter_rule,
},
},
/* TPM_CNM_IPV4_PRE_FILTER_KEY_3 */
{
{
TPM_CNM_IPV4_PRE_FILTER_KEY_1_MASK|TPM_CNM_IPV4_PRE_FILTER_KEY_2_MASK|TPM_CNM_IPV4_PRE_FILTER_KEY_3_MASK,
tpm_proc_check_cnm_ipv4_pre_filter_triple_key_rule,
tpm_proc_add_cnm_ipv4_pre_filter_triple_key_rule,
tpm_proc_del_cnm_ipv4_pre_filter_rule,
},
{
TPM_CNM_IPV4_PRE_FILTER_KEY_1_MASK|TPM_CNM_IPV4_PRE_FILTER_KEY_3_MASK,
tpm_proc_check_cnm_ipv4_pre_filter_double_key_rule,
tpm_proc_add_cnm_ipv4_pre_filter_double_key_rule,
tpm_proc_del_cnm_ipv4_pre_filter_rule,
},
{
TPM_CNM_IPV4_PRE_FILTER_KEY_2_MASK|TPM_CNM_IPV4_PRE_FILTER_KEY_3_MASK,
tpm_proc_check_cnm_ipv4_pre_filter_double_key_rule,
tpm_proc_add_cnm_ipv4_pre_filter_double_key_rule,
tpm_proc_del_cnm_ipv4_pre_filter_rule,
},
{
TPM_CNM_IPV4_PRE_FILTER_KEY_3_MASK,
tpm_proc_check_cnm_ipv4_pre_filter_single_key_rule,
tpm_proc_add_cnm_ipv4_pre_filter_single_key_rule,
tpm_proc_del_cnm_ipv4_pre_filter_rule,
},
},
};
uint32_t sg_l2_cnm_prec_ind[TPM_MAX_NUM_CTC_PRECEDENCE] = {0x0, 0x0, 0x0, 0x1, 0x1, 0x3, 0x3, 0x7};
uint32_t sg_ipv4_cnm_prec_mask[TPM_MAX_NUM_CTC_PRECEDENCE] = {0x0, 0x0, 0x1, 0x1, 0x2, 0x2, 0x4, 0x4};
/* Local Functions */
/* Function Declarations */
tpm_error_code_t tpm_owner_id_check(tpm_api_type_t api_type, uint32_t owner_id);
int32_t tpm_proc_multicast_reset(void);
uint32_t tpm_proc_bc_check(tpm_l2_acl_key_t *l2_key)
{
uint32_t i;
for (i = 0; i < 6; i++) {
if ((l2_key->mac.mac_da[i] & l2_key->mac.mac_da_mask[i]) != 0xFF)
return(TPM_FALSE);
}
return(TPM_TRUE);
}
uint32_t tpm_proc_mc_check(tpm_l2_acl_key_t *l2_key)
{
if ((l2_key->mac.mac_da[0] & l2_key->mac.mac_da_mask[0]) & 0x01)
return(TPM_TRUE);
return(TPM_FALSE);
}
/*******************************************************************************
* tpm_proc_src_port_gmac_bm_map()
*
* DESCRIPTION: The function correlates a source_port to it's Rx GMAC(s) by means of the GMAC Functionality
*
*
* INPUTS:
* src_port - source port in API format
*
* OUTPUTS:
* gmac_bm - Bitmap of the GMACs relevant to set in TCAM
*
* RETURNS:
* On success, the function returns TPM_OK. On error different types are returned
* according to the case - see tpm_db_err_t.
*
* COMMENTS:
*
*******************************************************************************/
int32_t tpm_proc_src_port_gmac_bm_map(tpm_src_port_type_t src_port,
tpm_gmac_bm_t *gmac_bm)
{
tpm_gmacs_enum_t gmac_i;
tpm_gmac_bm_t l_gmac_bm = 0;
tpm_db_gmac_func_t gmac_func;
tpm_init_gmac_conn_conf_t gmac_port_conf;
for (gmac_i = TPM_ENUM_GMAC_0; gmac_i <= TPM_MAX_GMAC; gmac_i++) {
if (!tpm_db_gmac_valid(gmac_i))
continue;
tpm_db_gmac_func_get(gmac_i, &gmac_func);
/* WAN possiblilties */
if (FROM_WAN(src_port) && GMAC_IS_WAN(gmac_func))
l_gmac_bm |= gmac_pnc_bm[gmac_i];
/* LAN possiblilties (Note: can be from both WAN or LAN) */
/* From UNI_$, include UNI_virt Port */
if (FROM_SPEC_UNI(src_port) && GMAC_IS_LAN(gmac_func)) {
l_gmac_bm |= gmac_pnc_bm[gmac_i];
//break;
/* Any remaining LAN option (UNI_ANY or WAN_OR_LAN) */
} else if (FROM_LAN(src_port) && (GMAC_IS_UNI_LAN(gmac_func) || GMAC_IS_LAN(gmac_func))) {
if (GMAC_IS_UNI_LAN(gmac_func)) {
if (tpm_db_gmac_conn_conf_get(gmac_i, &gmac_port_conf)) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, "gmac(%d) connection info get fail\n", gmac_i);
return(TPM_FAIL);
}
if (src_port == gmac_port_conf.port_src) {
l_gmac_bm |= gmac_pnc_bm[gmac_i];
break;
} else if (src_port == TPM_SRC_PORT_UNI_ANY) {
l_gmac_bm |= gmac_pnc_bm[gmac_i];
}
} else {
l_gmac_bm |= gmac_pnc_bm[gmac_i];
}
}
}
TPM_OS_DEBUG(TPM_TPM_LOG_MOD, "gmac_bm(0x%x)\n", l_gmac_bm);
*gmac_bm = l_gmac_bm;
return(TPM_OK);
}
void tpm_proc_set_int_structs(tpm_pncl_pnc_full_t *pnc_data, tpm_pncl_offset_t *start_offset,
tpm_rule_entry_t *api_data, tpm_db_pnc_conn_t *pnc_conn,
tpm_db_pnc_range_t *range_data)
{
/* Set Structs to zero */
memset(pnc_data, 0, sizeof(tpm_pncl_pnc_full_t));
memset(start_offset, 0, sizeof(tpm_pncl_offset_t));
memset(api_data, 0, sizeof(tpm_rule_entry_t));
memset(pnc_conn, 0, sizeof(tpm_db_pnc_conn_t));
memset(range_data, 0, sizeof(tpm_db_pnc_range_t));
return;
}
int32_t tpm_proc_create_acl_pnc_entry(tpm_api_sections_t api_section, uint32_t rule_num,
tpm_pncl_pnc_full_t *pnc_data, uint32_t *pnc_entry, uint32_t *api_rng_entries)
{
tpm_db_pnc_range_t range_data;
uint32_t pnc_range_id;
uint32_t pnc_range_start, api_start, pnc_stop_entry, l_api_rng_entries;
int32_t int_ret_code;
/* Get pnc_range_id */
int_ret_code = tpm_db_api_section_main_pnc_get(api_section, &pnc_range_id);
IF_ERROR(int_ret_code);
/*** Calculate PNC Entry ***/
/* Get PNC Range Start */
int_ret_code = tpm_db_pnc_rng_get(pnc_range_id, &range_data);
IF_ERROR(int_ret_code);
pnc_range_start = range_data.pnc_range_conf.range_start;
api_start = range_data.pnc_range_conf.api_start;
/* Get number of existing api entries */
int_ret_code = tpm_db_api_section_num_entries_get(api_section, &l_api_rng_entries);
IF_ERROR(int_ret_code);
/* Calculate absolute PNC entry number to execute */
*pnc_entry = (pnc_range_start + api_start) + rule_num;
TPM_OS_DEBUG(TPM_TPM_LOG_MOD, " rule_num(%d), l_api_rng_entries(%d)\n", rule_num, l_api_rng_entries);
/* Call PNC Entry Insert, if this is not the api_section's new last entry */
if (rule_num < l_api_rng_entries) {
pnc_stop_entry = (pnc_range_start + api_start) + (l_api_rng_entries - 1);
int_ret_code = tpm_pncl_entry_insert(*pnc_entry, pnc_stop_entry, pnc_data);
IF_ERROR(int_ret_code);
} else { /* Otherwise just set the entry (no insertion) */
int_ret_code = tpm_pncl_entry_set(*pnc_entry, pnc_data);
IF_ERROR(int_ret_code);
}
/* Decrease number of free entries in pnc_range */
int_ret_code = tpm_db_pnc_rng_free_ent_dec(pnc_range_id);
IF_ERROR(int_ret_code);
*api_rng_entries = l_api_rng_entries;
return(TPM_OK);
}
int32_t tpm_proc_create_table_pnc_entry(tpm_api_sections_t api_section, uint32_t rule_num, uint32_t update_sram,
tpm_pncl_pnc_full_t *pnc_data, uint32_t *pnc_entry,
uint32_t *api_rng_entries)
{
tpm_db_pnc_range_t range_data;
uint32_t pnc_range_id;
uint32_t pnc_range_start, api_start, l_api_rng_entries;
int32_t int_ret_code;
/* Get pnc_range_id */
int_ret_code = tpm_db_api_section_main_pnc_get(api_section, &pnc_range_id);
IF_ERROR(int_ret_code);
/*** Calculate PNC Entry ***/
/* Get PNC Range Start */
int_ret_code = tpm_db_pnc_rng_get(pnc_range_id, &range_data);
IF_ERROR(int_ret_code);
/* check the rule_number */
if (rule_num > range_data.pnc_range_conf.api_end) {
/* rule_number out of range */
TPM_OS_ERROR(TPM_TPM_LOG_MOD, " rule_num(%d) out of range, range api_end(%d)\n",
rule_num, range_data.pnc_range_conf.api_end);
return(TPM_FAIL);
}
pnc_range_start = range_data.pnc_range_conf.range_start;
api_start = range_data.pnc_range_conf.api_start;
/* Get number of existing api entries */
int_ret_code = tpm_db_api_section_num_entries_get(api_section, &l_api_rng_entries);
IF_ERROR(int_ret_code);
/* Calculate absolute PNC entry number to execute */
*pnc_entry = (pnc_range_start + api_start) + rule_num;
TPM_OS_DEBUG(TPM_TPM_LOG_MOD, " rule_num(%d), l_api_rng_entries(%d)\n", rule_num, l_api_rng_entries);
if (update_sram) {
/* Just update sram */
int_ret_code = tpm_pncl_update_sram(*pnc_entry, pnc_data);
IF_ERROR(int_ret_code);
} else {
/* Just set the entry (no insertion) */
int_ret_code = tpm_pncl_entry_set(*pnc_entry, pnc_data);
IF_ERROR(int_ret_code);
/* Decrease number of free entries in pnc_range */
int_ret_code = tpm_db_pnc_rng_free_ent_dec(pnc_range_id);
IF_ERROR(int_ret_code);
}
*api_rng_entries = l_api_rng_entries;
return(TPM_OK);
}
/*******************************************************************************
* tpm_proc_trg_port_gmac_map()
*
* DESCRIPTION: The function correlates a source_port to it's Rx GMAC(s) by means of the GMAC Functionality
*
*
* INPUTS:
* trg_port - source port in API format
*
* OUTPUTS:
* gmac_port - Target GMAC
*
* RETURNS:
* On success, the function returns TPM_OK. On error different types are returned
* according to the case - see tpm_db_err_t.
*
* COMMENTS:
*
*******************************************************************************/
int32_t tpm_proc_trg_port_gmac_map(tpm_trg_port_type_t trg_port, tpm_gmacs_enum_t *gmac_port)
{
tpm_gmacs_enum_t gmac_i;
tpm_db_gmac_func_t gmac_func;
tpm_init_gmac_conn_conf_t gmac_port_conf;
if (gmac_port == NULL) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, "Invalid pointer-NULL \n");
return(ERR_GENERAL);
}
(*gmac_port) = -1;
for (gmac_i = TPM_ENUM_GMAC_0; gmac_i <= TPM_MAX_GMAC; gmac_i++) {
if (!tpm_db_gmac_valid(gmac_i))
continue;
tpm_db_gmac_func_get(gmac_i, &gmac_func);
if (tpm_db_gmac_conn_conf_get(gmac_i, &gmac_port_conf)) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, "gmac(%d) connection info get fail\n", gmac_i);
return(TPM_FAIL);
}
/* TRG WAN */
if ((TRG_WAN(trg_port)) && GMAC_IS_WAN(gmac_func)) {
(*gmac_port) = gmac_i;
break;
/* TRG GMAC_UNI, such as MC lpk, dual gmac uni */
} else if (TRG_UNI(trg_port) && (GMAC_IS_UNI_LAN(gmac_func) || GMAC_IS_DS_UNI_LAN(gmac_func))) {
if ((gmac_port_conf.port_src != TPM_SRC_PORT_ILLEGAL) &&
((trg_port == (1 << (gmac_port_conf.port_src + TPM_TRG_UNI_OFFSET))) ||
(trg_port == TPM_TRG_PORT_UNI_ANY))) {
(*gmac_port) = gmac_i;
break;
}
/* TRG UNI, such as KW2 */
} else if (TRG_UNI(trg_port) && GMAC_IS_LAN(gmac_func)) {
(*gmac_port) = gmac_i;
break;
}
}
return(TPM_OK);
}
uint32_t tpm_proc_gmac1_phy_src_port(tpm_src_port_type_t src_port)
{
tpm_db_gmac_func_t gmac_func;
if (src_port == TPM_SRC_PORT_UNI_VIRT) {
tpm_db_gmac_func_get(TPM_ENUM_GMAC_1, &gmac_func);
if (gmac_func != TPM_GMAC_FUNC_VIRT_UNI)
return(TPM_TRUE);
}
return(TPM_FALSE);
}
/*******************************************************************************
* tpm_proc_all_gmac_bm()
*
* DESCRIPTION: Gets a TCAMport_bitmap of all active GMACs
*
* INPUTS:
*
* RETURNS:
* Returns a bitmap of all the active GMACs
*
* COMMENTS: Perfom only DB integrity checks, not data correctness checks
*
*******************************************************************************/
uint32_t tpm_proc_all_gmac_bm(void)
{
tpm_gmacs_enum_t gmac_i;
tpm_gmac_bm_t l_gmac_bm = 0;
tpm_db_gmac_func_t gmac_func;
for (gmac_i = TPM_ENUM_GMAC_0; gmac_i <= TPM_MAX_GMAC; gmac_i++) {
if (!tpm_db_gmac_valid(gmac_i))
continue;
tpm_db_gmac_func_get(gmac_i, &gmac_func);
if (gmac_func != TPM_GMAC_FUNC_NONE)
l_gmac_bm |= gmac_pnc_bm[gmac_i];
}
return(l_gmac_bm);
}
int32_t tpm_proc_delete_mod(tpm_mod_owner_t owner, tpm_gmacs_enum_t gmac_port, uint32_t mod_entry)
{
tpm_gmacs_enum_t duplicate_gmac;
tpm_db_ds_mac_based_trunk_enable_t ds_mac_based_trunk_enable;
int32_t ret_code;
ret_code = tpm_mod2_entry_del(owner, gmac_port, mod_entry);
IF_ERROR(ret_code);
/* when ds load balance on G0 and G1 is enabled, need to duplicate DS PMT on G0/1 */
tpm_db_ds_mac_based_trunk_enable_get(&ds_mac_based_trunk_enable);
if ( (TPM_DS_MAC_BASED_TRUNK_DISABLED == ds_mac_based_trunk_enable)
|| ((gmac_port != TPM_ENUM_GMAC_0) && (gmac_port != TPM_ENUM_GMAC_1))) {
/* if this is US or DS_MAC_BASED_TRUNK is DISABLED, do nothing */
return(TPM_OK);
}
if (gmac_port == TPM_ENUM_GMAC_0)
duplicate_gmac = TPM_ENUM_GMAC_1;
else
duplicate_gmac = TPM_ENUM_GMAC_0;
ret_code = tpm_mod2_entry_del(owner, duplicate_gmac, mod_entry);
IF_ERROR(ret_code);
return(TPM_OK);
}
int32_t tpm_proc_create_mod(tpm_pkt_action_t pkt_act, tpm_trg_port_type_t trg_port, tpm_pkt_mod_t *pkt_mod,
tpm_pkt_mod_bm_t pkt_mod_bm, tpm_pkt_mod_int_bm_t int_mod_bm, uint32_t *mod_entry,
uint32_t *trg_gmac)
{
int32_t ret_code;
tpm_gmacs_enum_t duplicate_gmac;
tpm_db_ds_mac_based_trunk_enable_t ds_mac_based_trunk_enable;
if (SET_MOD(pkt_act)) {
/* Currently supporting Vlan operation only */
/* Get dest. gmac */
if (TPM_TRG_LOAD_BAL & trg_port) {
/* DS load balance, set trg port to G1 */
*trg_gmac = TPM_ENUM_GMAC_1;
} else {
tpm_proc_trg_port_gmac_map(trg_port, trg_gmac);
if (*trg_gmac == -1) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, "pkt modification not possible on this target gmac(%d) \n",
*trg_gmac);
return(ERR_ACTION_INVALID);
}
}
ret_code = tpm_mod2_entry_set(TPM_MOD_OWNER_TPM, *trg_gmac, pkt_mod_bm, int_mod_bm, pkt_mod, mod_entry);
IF_ERROR(ret_code);
/* when ds load balance on G0 and G1 is enabled, need to duplicate DS PMT on G0/1 */
tpm_db_ds_mac_based_trunk_enable_get(&ds_mac_based_trunk_enable);
if ( (TPM_DS_MAC_BASED_TRUNK_DISABLED == ds_mac_based_trunk_enable)
|| (TRG_WAN(trg_port))) {
/* if this is US or DS_MAC_BASED_TRUNK is DISABLED, do nothing */
return(TPM_OK);
}
if (*trg_gmac == TPM_ENUM_GMAC_0)
duplicate_gmac = TPM_ENUM_GMAC_1;
else if (*trg_gmac == TPM_ENUM_GMAC_1)
duplicate_gmac = TPM_ENUM_GMAC_0;
else {
TPM_OS_INFO(TPM_TPM_LOG_MOD, "target gmac(%d) invalid\n", *trg_gmac);
return(TPM_OK);
}
ret_code = tpm_mod2_entry_set(TPM_MOD_OWNER_TPM, duplicate_gmac,
pkt_mod_bm, int_mod_bm, pkt_mod, mod_entry);
IF_ERROR(ret_code);
}
return(TPM_OK);
}
void tpm_proc_set_api_moddata(tpm_pkt_action_t pkt_act, uint32_t trg_gmac, tpm_db_mod_conn_t *mod_con,
uint32_t mod_entry)
{
/* Set modification data in API data */
if (SET_MOD(pkt_act)) {
mod_con->mod_cmd_ind = mod_entry;
mod_con->mod_cmd_mac = trg_gmac;
} else {
mod_con->mod_cmd_ind = 0;
mod_con->mod_cmd_mac = TPM_INVALID_GMAC;
}
return;
}
int32_t tpm_proc_check_missing_data(tpm_rule_action_t *rule_action,
tpm_pkt_mod_t *pkt_mod,
tpm_pkt_frwd_t *pkt_frwd,
void *parsing_key,
tpm_pkt_action_t pkt_act,
tpm_parse_fields_t parse_rule_bm)
{
/* Check keys exist for parse fields */
if (rule_action == NULL) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, "rule_action cannot be NULL\n");
return(ERR_ACTION_INVALID);
}
if ((pkt_mod == NULL) && (SET_MOD(pkt_act))) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, "Packet Mod requested with NULL pointer\n");
return(ERR_MOD_INVALID);
}
if ((pkt_frwd == NULL) && ((SET_TARGET_PORT(pkt_act)) || SET_TARGET_QUEUE(pkt_act))) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, "Target set requested with NULL pointer\n");
return(ERR_FRWD_INVALID);
}
if ((parsing_key == NULL) && (parse_rule_bm != 0)) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, "Parsing requested with NULL pointer\n");
return(ERR_FRWD_INVALID);
}
return(TPM_OK);
}
int32_t tpm_proc_check_valid_target(tpm_dir_t dir,
tpm_db_pon_type_t pon_type,
tpm_src_port_type_t src_port,
tpm_trg_port_type_t trg_port,
uint8_t trg_queue,
tpm_pkt_action_t pkt_act,
uint8_t ds_load_bal_valid)
{
tpm_init_virt_uni_t virt_uni_info;
int32_t ret_code;
uint32_t rx_queue_valid, rx_queue_size;
tpm_gmac_bm_t gmac_bm;
tpm_gmacs_enum_t gmac;
uint32_t tx_queue_valid, tx_queue_size;
tpm_db_txq_owner_t tx_owner;
tpm_db_tx_mod_t tx_port;
tpm_gmacs_enum_t act_wan= tpm_db_active_wan_get();
/* Check Valid Target */
if (SET_TARGET_PORT(pkt_act)) {
if (TO_PON(dir, trg_port, pon_type, act_wan) ||
TO_ETHWAN(dir, trg_port, act_wan) || TO_CPU(trg_port)) {
/* PON CPU is OK */
} else if (TO_LAN(dir, trg_port)){
/* check target uni port valid or not */
ret_code = tpm_proc_check_dst_uni_port(trg_port);
IF_ERROR(ret_code);
/* check ds load balance trg */
if (trg_port & TPM_TRG_LOAD_BAL) {
if (!ds_load_bal_valid) {
/* TPM_TRG_LOAD_BAL should not be set */
TPM_OS_ERROR(TPM_TPM_LOG_MOD, "TPM_TRG_LOAD_BAL should not be set\n");
return(ERR_FRWD_INVALID);
}
if (!tpm_db_ds_load_bal_en_get()) {
/* profile dose not support TPM_TRG_LOAD_BAL */
TPM_OS_ERROR(TPM_TPM_LOG_MOD, "profile dose not support TPM_TRG_LOAD_BAL\n");
return(ERR_FRWD_INVALID);
}
}
} else {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, "* dir=%d, trg_port=%d, pon_type=%d *\r\n", dir, trg_port,
pon_type);
TPM_OS_ERROR(TPM_TPM_LOG_MOD, "Illegal Target Port\n");
return(ERR_FRWD_INVALID);
}
}
/* check valid target when virt uni via GMAC1 feature is enabled */
/* not all target port combinations are supported when WiFi via GMAC1 = UNI_4 feature is enabled */
ret_code = tpm_db_virt_info_get(&virt_uni_info);
if (ret_code != TPM_DB_OK) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, " virt uni recvd ret_code(%d)\n", ret_code);
return(ERR_FRWD_INVALID);
}
if ((virt_uni_info.enabled == 1) && (dir == TPM_DIR_DS) && (SET_TARGET_PORT(pkt_act))) {
/* virt uni feature is enabled - validate and recalculate the mh_reg */
ret_code = tpm_proc_virt_uni_trg_port_validation(trg_port);
if (ret_code != TPM_OK) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, "* dir=%d, trg_port=%d, pon_type=%d *\r\n", dir, trg_port,
pon_type);
TPM_OS_ERROR(TPM_TPM_LOG_MOD, "Illegal Virt UNI Target Port.\n");
return(ERR_FRWD_INVALID);
}
}
/* Check Valid Queue */
tpm_proc_src_port_gmac_bm_map(src_port, &gmac_bm);
/* TODO - Check Queue depending on actual queues in target or in Rx */
if (SET_TARGET_QUEUE(pkt_act) && (trg_queue >= TPM_MAX_NUM_TX_QUEUE)) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, "Target Queue Out of Range\n");
return(ERR_FRWD_INVALID);
}
/* Check Rx queue valid */
if (SET_TARGET_PORT(pkt_act) && TO_CPU(trg_port) && SET_TARGET_QUEUE(pkt_act)) {
for (gmac = TPM_ENUM_GMAC_0; gmac < TPM_MAX_NUM_GMACS; gmac++) {
if (((gmac_bm & TPM_BM_GMAC_0) && (gmac == TPM_ENUM_GMAC_0)) ||
((gmac_bm & TPM_BM_GMAC_1) && (gmac == TPM_ENUM_GMAC_1)) ||
((gmac_bm & TPM_BM_PMAC) && (gmac == TPM_ENUM_PMAC))) {
/* Get Rx queue info */
ret_code = tpm_db_gmac_rx_q_conf_get(gmac, trg_queue, &rx_queue_valid, &rx_queue_size);
if (ret_code != TPM_DB_OK) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, " rx queue recvd ret_code(%d)\n", ret_code);
return(ERR_FRWD_INVALID);
}
/* Check queue valid state */
if (TPM_FALSE == rx_queue_valid) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, "Target Queue Invalid\n");
return(ERR_FRWD_INVALID);
}
/* Check queue size */
if (0 == rx_queue_size) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, "Target Queue Size is Zero\n");
return(ERR_FRWD_INVALID);
}
}
}
}
/* Check Tx queue valid */
if (SET_TARGET_PORT(pkt_act) && (!TO_CPU(trg_port)) && SET_TARGET_QUEUE(pkt_act)) {
tpm_proc_trg_port_gmac_map(trg_port, &gmac);
for (tx_port = TPM_TX_MOD_GMAC0; tx_port < TPM_MAX_NUM_TX_PORTS; tx_port++) {
if (((trg_port & TPM_TRG_TCONT_0) && (act_wan == TPM_ENUM_PMAC) && (tx_port == TPM_TX_MOD_PMAC_0)) ||
((trg_port & TPM_TRG_TCONT_0) && (act_wan == TPM_ENUM_GMAC_0) && (tx_port == TPM_TX_MOD_GMAC0)) ||
((trg_port & TPM_TRG_TCONT_0) && (act_wan == TPM_ENUM_GMAC_1) && (tx_port == TPM_TX_MOD_GMAC1)) ||
((trg_port & TPM_TRG_TCONT_1) && (tx_port == TPM_TX_MOD_PMAC_1)) ||
((trg_port & TPM_TRG_TCONT_2) && (tx_port == TPM_TX_MOD_PMAC_2)) ||
((trg_port & TPM_TRG_TCONT_3) && (tx_port == TPM_TX_MOD_PMAC_3)) ||
((trg_port & TPM_TRG_TCONT_4) && (tx_port == TPM_TX_MOD_PMAC_4)) ||
((trg_port & TPM_TRG_TCONT_5) && (tx_port == TPM_TX_MOD_PMAC_5)) ||
((trg_port & TPM_TRG_TCONT_6) && (tx_port == TPM_TX_MOD_PMAC_6)) ||
((trg_port & TPM_TRG_TCONT_7) && (tx_port == TPM_TX_MOD_PMAC_7)) ||
((trg_port & (TPM_TRG_UNI_0 |
TPM_TRG_UNI_1 |
TPM_TRG_UNI_2 |
TPM_TRG_UNI_3 |
TPM_TRG_UNI_4 |
TPM_TRG_UNI_5 |
TPM_TRG_UNI_6 |
TPM_TRG_UNI_7 |
TPM_TRG_UNI_VIRT |
TPM_TRG_PORT_UNI_ANY)) && (tx_port == (tpm_db_tx_mod_t)gmac))) {
/* Get Tx queue info */
ret_code = tpm_db_gmac_tx_q_conf_get(tx_port,
trg_queue,
&tx_queue_valid,
NULL,
&tx_owner,
NULL,
&tx_queue_size,
NULL);
if (ret_code != TPM_DB_OK) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, " tx queue recvd ret_code(%d)\n", ret_code);
return(ERR_FRWD_INVALID);
}
/* Check queue valid state */
if (TPM_FALSE == tx_queue_valid) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, "Target Queue Invalid\n");
return(ERR_FRWD_INVALID);
}
/* Check queue owner */
if (((gmac_bm & TPM_BM_GMAC_0) && (tx_owner != TPM_Q_OWNER_GMAC0)) ||
((gmac_bm & TPM_BM_GMAC_1) && (tx_owner != TPM_Q_OWNER_GMAC1)) ||
((gmac_bm & TPM_BM_PMAC) && (tx_owner != TPM_Q_OWNER_PMAC))) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, "Target Queue Owner Invalid, gmac_bm: [%d], tx_owner: [%d]\n",
gmac_bm, tx_owner);
return(ERR_FRWD_INVALID);
}
/* check queue size */
if (0 == tx_queue_size) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, "Target Queue Size is Zero\n");
return(ERR_FRWD_INVALID);
}
}
}
}
return(TPM_OK);
}
int32_t tpm_proc_check_pkt_action(tpm_pkt_action_t pkt_act,
tpm_trg_port_type_t trg_port,
tpm_pkt_mod_t *pkt_mod,
tpm_pkt_mod_bm_t pkt_mod_bm)
{
if (PKT_DROP(pkt_act)) {
if (SET_TARGET_PORT(pkt_act) || SET_TARGET_QUEUE(pkt_act) || SET_MOD(pkt_act) || SET_CPU(pkt_act)) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, "Packet dropped action, no other packet actions are allowed \n");
return(ERR_ACTION_INVALID);
}
}
if (SET_CPU(pkt_act)) {
if ((!SET_TARGET_PORT(pkt_act)) || (trg_port != TPM_TRG_PORT_CPU)) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD,"trg_port %x \n", trg_port);
TPM_OS_ERROR(TPM_TPM_LOG_MOD,
"For packet TO_CPU action, target port should be set and equal to CPU port.\n");
return(ERR_ACTION_INVALID);
}
}
if (SET_MOD(pkt_act)) {
/* if split mod, do not check trgt port */
if ((TPM_SPLIT_MOD_ENABLED == tpm_db_split_mod_get_enable())
&& (pkt_mod_bm == TPM_VLAN_MOD)
&& (pkt_mod->vlan_mod.vlan_op == VLANOP_SPLIT_MOD_PBIT)) {
} else if ((!SET_TARGET_PORT(pkt_act)) || (trg_port == TPM_TRG_PORT_CPU)) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD,
"For rule SET_MOD action, target port should be set and should not be CPU port. \n");
return(ERR_ACTION_INVALID);
}
}
return(TPM_OK);
}
#if 0 /*Phase1 - no longer has below limitation */
int32_t tpm_proc_pkt_mod_check(tpm_pkt_action_t pkt_act, tpm_pkt_mod_bm_t pkt_mod_bm, tpm_pkt_mod_t *pkt_mod)
{
if (SET_MOD(pkt_act)) {
if (pkt_mod_bm != TPM_VLAN_MOD) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, "Support only VLAN operations \n");
return(ERR_MOD_INVALID);
}
if (pkt_mod->vlan_mod.vlan_op >= VLANOP_ILLEGAL) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, "Illegal VLAN Operation \n");
return(ERR_MOD_INVALID);
}
}
return(TPM_OK);
}
#endif
/* in PARSE_FLAG_BM - bit#0 and bit#1 - TRUE and FALSE - should not be set simultaneously
check pairs of bits in the parse_flags_bm for validation */
int32_t tpm_proc_check_parse_flag_valid(tpm_parse_flags_t parse_flags_bm)
{
int32_t i;
for (i = 0; i < TPM_PARSE_FLAG_CHECK_FIELD_NUMBER; i++) {
if (PARSE_FLAG_CHECK(parse_flags_bm, i * 2) == TPM_PARSE_FLAG_CHECK_TRUE_FALSE)
return(ERR_PARSE_MAP_INVALID);
}
return(TPM_OK);
}
/* Get GMAC Lan UNI number and UNI port number */
/*******************************************************************************
* tpm_proc_gmaclanuni_uninum_get()
*
* DESCRIPTION: The function Get GMAC Lan UNI number and UNI port number.
*
* INPUTS:
* src_port - None
*
* OUTPUTS:
* gmac_is_uni_num - number of GMAC which is LAN UNI
* max_uni_port_num - number os UNI ports
*
* RETURNS:
* On success, the function returns TPM_OK. On error different types are returned
* according to the case - see tpm_db_err_t.
*
* COMMENTS:
*
*******************************************************************************/
int32_t tpm_proc_gmaclanuni_uninum_get(uint32_t *gmac_is_uni_num, uint32_t *max_uni_port_num)
{
tpm_db_gmac_func_t gmac_func;
tpm_gmacs_enum_t gmac_i;
uint32_t temp1 = 0, temp2 = 0;
/* Cal number of GMAC LAN UNI */
for (gmac_i = TPM_ENUM_GMAC_0; gmac_i <= TPM_MAX_GMAC; gmac_i++) {
tpm_db_gmac_func_get(gmac_i, &gmac_func);
if (GMAC_IS_UNI_LAN(gmac_func))
temp1++;
}
/* Get Max UNI port number */
tpm_db_max_uni_port_nr_get(&temp2);
*gmac_is_uni_num = temp1;
*max_uni_port_num = temp2;
return (TPM_OK);
}
/*******************************************************************************
* tpm_proc_src_port_check()
*
* DESCRIPTION: The function checks if the requested source port is legit.
*
* INPUTS:
* src_port - source port in API format
*
* OUTPUTS:
*
* RETURNS:
* On success, the function returns TPM_OK. On error different types are returned
* according to the case - see tpm_db_err_t.
*
* COMMENTS:
*
*******************************************************************************/
int32_t tpm_proc_src_port_check(tpm_src_port_type_t src_port)
{
tpm_db_pon_type_t pon_type;
int32_t ret_code;
tpm_db_chip_conn_t dummy_chip_con;
tpm_db_int_conn_t dummy_int_conn;
uint32_t dummy_switch_port;
tpm_init_virt_uni_t virt_uni_info;
uint32_t gmac_is_uni_num = 0, max_uni_port_num = 0;
/* Check Port exists */
if (src_port == TPM_SRC_PORT_WAN) {
if (TPM_ENUM_PMAC == tpm_db_active_wan_get()){
tpm_db_pon_type_get(&pon_type);
if (pon_type >= TPM_NONE) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, "No pon_type defined (0x%x)\n", pon_type);
return(TPM_FAIL);
}
}
} else if (FROM_SPEC_UNI(src_port) && (src_port != TPM_SRC_PORT_UNI_VIRT)) {
/* Check if port exists */
ret_code = tpm_db_eth_port_conf_get(src_port,
&dummy_chip_con,
&dummy_int_conn,
&dummy_switch_port);
if (ret_code != TPM_DB_OK) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, "Source UNI Port (%d) is not valid port \n", src_port);
return(TPM_FAIL);
}
} else if (src_port == TPM_SRC_PORT_UNI_ANY) {
/* Check UNI_ANY is supported or not */
/* Get GMAC LAN_UNI and UNI ports number */
tpm_proc_gmaclanuni_uninum_get(&gmac_is_uni_num, &max_uni_port_num);
if (gmac_is_uni_num > TPM_SRC_PORT_UNI_1 ||
(gmac_is_uni_num == TPM_SRC_PORT_UNI_1 && max_uni_port_num > TPM_SRC_PORT_UNI_1)) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, "Src port UNI_ANY is not supported\n");
return(ERR_SRC_PORT_INVALID);
}
return(TPM_OK);
} else if (src_port == TPM_SRC_PORT_UNI_VIRT) {
tpm_db_virt_info_get(&virt_uni_info);
if (TPM_VIRT_UNI_DISABLED == virt_uni_info.enabled) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, "UNI_VIRT is not enabled\n");
return(ERR_SRC_PORT_INVALID);
}
} else {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, "Source UNI Port (%d) out of range \n", src_port);
return(TPM_FAIL);
}
return(TPM_OK);
}
/*******************************************************************************
* tpm_proc_src_port_dir_map()
*
* DESCRIPTION: The function maps the source port to the packet direction.
*
* INPUTS:
* src_port - source port in API format
*
* OUTPUTS:
* dir - Upstream or downstream direction
*
* RETURNS:
* On success, the function returns TPM_OK. On error different types are returned
* according to the case - see tpm_db_err_t.
*
* COMMENTS:
*
*******************************************************************************/
int32_t tpm_proc_src_port_dir_map(tpm_src_port_type_t src_port, tpm_dir_t *dir)
{
if (src_port == TPM_SRC_PORT_WAN)
(*dir) = TPM_DIR_DS;
else
(*dir) = TPM_DIR_US;
TPM_OS_DEBUG(TPM_TPM_LOG_MOD, " Source port(%d), direction(%d) \n", src_port, (*dir));
return(TPM_OK);
}
/*******************************************************************************
* tpm_proc_setstage_done()
*
* DESCRIPTION:
*
* INPUTS:
* rule_action -
* sram_data
* OUTPUTS:
*
* RETURNS:
* On success, the function returns TPM_OK. On error different types are returned
* according to the case - see tpm_db_err_t.
*
* COMMENTS:
*
*******************************************************************************/
void tpm_proc_setstage_done(tpm_rule_action_t *rule_action, tpm_pncl_sram_data_t *sram_data)
{
sram_data->next_lu_id = 0;
sram_data->next_lu_off_reg = 0;
sram_data->sram_updt_bm |= TPM_PNCL_SET_LUD;
return;
}
/*******************************************************************************
* tpm_proc_set_mod()
*
* DESCRIPTION:
*
* INPUTS:
* rule_action -
* sram_data
* OUTPUTS:
*
* RETURNS:
* On success, the function returns TPM_OK. On error different types are returned
* according to the case - see tpm_db_err_t.
*
* COMMENTS:
*
*******************************************************************************/
void tpm_proc_set_mod(tpm_rule_action_t *rule_action, tpm_pncl_sram_data_t *sram_data, uint32_t mod_cmd)
{
if (SET_MOD(rule_action->pkt_act)) {
sram_data->sram_updt_bm |= TPM_PNCL_SET_MOD;
sram_data->flow_id_sub.mod_cmd = mod_cmd;
TPM_OS_DEBUG(TPM_TPM_LOG_MOD, "Set Modification mod_cmd(%d)\n", mod_cmd);
}
}
/*******************************************************************************
* tpm_proc_set_cust_cpu_packet_parse()
*
* DESCRIPTION:
*
* INPUTS:
* rule_action -
* sram_data
* OUTPUTS:
*
* RETURNS:
* On success, the function returns TPM_OK. On error different types are returned
* according to the case - see tpm_db_err_t.
*
* COMMENTS:
*
*******************************************************************************/
void tpm_proc_set_cust_cpu_packet_parse(tpm_rule_action_t *rule_action, tpm_pncl_sram_data_t *sram_data)
{
/* set the RI = TPM_PNCL_SET_RX_SPECIAL for packets with CUSTOMIZE flag */
if (SET_CUST(rule_action->pkt_act)) {
sram_data->sram_updt_bm |= TPM_PNCL_SET_RX_SPECIAL;
TPM_OS_DEBUG(TPM_TPM_LOG_MOD, "Set Customization flag.\n");
}
}
/*******************************************************************************
* tpm_proc_set_trgt()
*
* DESCRIPTION:
*
* INPUTS:
* rule_action -
* sram_data
* OUTPUTS:
*
* RETURNS:
* On success, the function returns TPM_OK. On error different types are returned
* according to the case - see tpm_db_err_t.
*
* COMMENTS:
*
*******************************************************************************/
int32_t tpm_proc_set_trgt_queue(tpm_rule_action_t *rule_action,
tpm_pkt_frwd_t *pkt_frwd,
tpm_dir_t dir,
tpm_db_pon_type_t pon_type,
tpm_pncl_sram_data_t *sram_data)
{
uint32_t i;
tpm_db_ds_mac_based_trunk_enable_t ds_mac_based_trunk_enable;
if (SET_TARGET_PORT(rule_action->pkt_act)) {
tpm_gmacs_enum_t act_wan= tpm_db_active_wan_get();
/* Add Target Txp to update BM */
sram_data->sram_updt_bm |= TPM_PNCL_SET_TXP;
/* Set PNC FlowId Target */
if (TO_PON(dir, pkt_frwd->trg_port, pon_type, act_wan)) {
for (i = 0; i < 8; i++) {
if (pkt_frwd->trg_port == (uint32_t) (TPM_TRG_TCONT_0 << i))
break;
}
sram_data->flow_id_sub.pnc_target = TPM_PNC_TRG_PMAC0 + i;
} else if (TO_ETHWAN(dir, pkt_frwd->trg_port, act_wan)) {
switch (act_wan) {
case TPM_ENUM_GMAC_0:
sram_data->flow_id_sub.pnc_target = TPM_PNC_TRG_GMAC0;
break;
case TPM_ENUM_GMAC_1:
sram_data->flow_id_sub.pnc_target = TPM_PNC_TRG_GMAC1;
break;
default:
TPM_OS_ERROR(TPM_TPM_LOG_MOD, "Set Target, act_wan(%d) invalid",act_wan);
return(TPM_FAIL);
}
} else if (TO_LAN(dir, pkt_frwd->trg_port)) {
tpm_pnc_trg_t pnc_target;
/* when ds load balance on G0 and G1 is enabled, trgt port can only
* be set in the first range TPM_DS_MAC_BASED_TRUNKING.
*/
tpm_db_ds_mac_based_trunk_enable_get(&ds_mac_based_trunk_enable);
if (TPM_DS_MAC_BASED_TRUNK_ENABLED == ds_mac_based_trunk_enable) {
sram_data->sram_updt_bm &= (~TPM_PNCL_SET_TXP);
} else if (TPM_TRG_LOAD_BAL & pkt_frwd->trg_port) {
/* DS load balance, set trg port to G1 */
pnc_target = TPM_PNC_TRG_GMAC1;
} else if (tpm_db_to_lan_gmac_get(pkt_frwd->trg_port, &pnc_target) != TPM_DB_OK){
TPM_OS_ERROR(TPM_TPM_LOG_MOD, "tpm_db_to_lan_gmac_get failed trg_port 0x%x\n",
pkt_frwd->trg_port);
return(TPM_FAIL);
}
sram_data->flow_id_sub.pnc_target = pnc_target;
} else if (TO_CPU(pkt_frwd->trg_port)) {
sram_data->flow_id_sub.pnc_target = TPM_PNC_TRG_CPU;
} else {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, "Set Target received, no legitimate Target Found \n");
return(TPM_FAIL);
}
TPM_OS_DEBUG(TPM_TPM_LOG_MOD, "Set Target(%d)\n", sram_data->flow_id_sub.pnc_target);
/* If target is GPON, set GEM and MH */
if (TO_GPON(dir, pkt_frwd->trg_port, pon_type, act_wan)) {
sram_data->mh_reg.mh_set = TPM_TRUE;
sram_data->mh_reg.mh_reg = 1;
sram_data->sram_updt_bm |= TPM_PNCL_SET_GEM;
sram_data->flow_id_sub.gem_port = pkt_frwd->gem_port;
TPM_OS_DEBUG(TPM_TPM_LOG_MOD, "Set GemPort(%d)\n", pkt_frwd->gem_port);
}
}
if (SET_TARGET_QUEUE(rule_action->pkt_act)) {
/* Set Queue */
sram_data->pnc_queue = pkt_frwd->trg_queue;
TPM_OS_DEBUG(TPM_TPM_LOG_MOD, "Set Tx Queue (%x)\n", pkt_frwd->trg_queue);
} else {
sram_data->pnc_queue = TPM_PNCL_NO_QUEUE_UPDATE;
TPM_OS_DEBUG(TPM_TPM_LOG_MOD, "No Tx Queue Updat\n");
}
return(TPM_OK);
}
/*******************************************************************************
* tpm_proc_add_api_ent_check()
*
* DESCRIPTION: The function checks if there is a free_entry to add an API entry
*
* INPUTS:
* section - Section of the API acl/table
* api_rng_type - The Type of the API, ACL or Table
* OUTPUTS:
*
* RETURNS:
* On success, the function returns TPM_OK. On error different types are returned
* according to the case - see tpm_db_err_t.
*
* COMMENTS:
*
*******************************************************************************/
int32_t tpm_proc_add_api_ent_check(tpm_api_sections_t section, tpm_range_type_t api_rng_type, uint32_t rule_num)
{
int32_t ret_code, last_valid;
uint32_t api_rng_size, num_valid_entries, tbl_start;
tpm_pnc_ranges_t prim_pnc_range;
/* TODO - make the api_rng_type part of the api_rng database configuration */
TPM_OS_DEBUG(TPM_TPM_LOG_MOD, " api_sec(%d), api_rng_type(%d) \n", section, api_rng_type);
/* Check API range exists */
ret_code = tpm_db_api_section_get(section, &api_rng_size,
&num_valid_entries, &prim_pnc_range, &last_valid, &tbl_start);
IF_ERROR(ret_code);
/* Check possible to add another entry */
if (num_valid_entries > (api_rng_size-1)) {
/* If the range mode is table, it is unnecessary to check */
if (api_rng_type == TPM_RANGE_TYPE_ACL) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, " api_sec(%d), has no free entries \n", section);
return(TPM_FAIL);
}
}
/* Check last_valid consistency, for acl type range */
if (api_rng_type == TPM_RANGE_TYPE_ACL) {
if (last_valid != ((int32_t) (num_valid_entries) - (int32_t) (1))) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, "DB problem, api_section(%d),num_entries(%d),last_valid(%d)\n",
section, num_valid_entries, last_valid);
return(TPM_FAIL);
}
/* make sure in api table there will not be a gap after insert this rule */
if (rule_num > num_valid_entries) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, "api table there will not be a gap after insert the api,"
"api_section(%d), num_entries(%d),last_valid(%d), rule_num(%d)\n",
section, num_valid_entries, last_valid, rule_num);
return(TPM_FAIL);
}
}
return(TPM_OK);
}
int32_t tpm_proc_check_api_busy(tpm_api_type_t api_type, uint32_t rule_num)
{
uint32_t db_ret_code;
int32_t rc_code = TPM_RC_OK;
tpm_api_sections_t api_section;
tpm_pnc_ranges_t range_id;
tpm_db_pnc_range_conf_t rangConf;
TPM_OS_DEBUG(TPM_TPM_LOG_MOD, "api_type(%d) rule_num(%d)\n", api_type, rule_num);
/* Get api_section, range_Id, range configuration, to get range type */
db_ret_code = tpm_db_api_section_get_from_api_type(api_type, &api_section);
IF_ERROR(db_ret_code);
db_ret_code = tpm_db_api_section_main_pnc_get(api_section, &range_id);
IF_ERROR(db_ret_code);
db_ret_code = tpm_db_pnc_rng_conf_get(range_id, &rangConf);
IF_ERROR(db_ret_code);
/* Check API Section Busy */
/* In ACL Mode - concurrency not supported.
In TABLE Mode - concurrency supported, but not on the same entry */
spin_lock_bh(&tpm_proc_api_call_lock);
if ((rangConf.range_type == TPM_RANGE_TYPE_ACL) && ((tpm_db_get_api_busy(api_type))== TPM_TRUE))
rc_code = ERR_API_BUSY;
else if ((rangConf.range_type == TPM_RANGE_TYPE_TABLE) &&
((tpm_db_get_api_rule_num_busy(api_type, rule_num)) == TPM_TRUE))
rc_code = ERR_API_BUSY;
else {
db_ret_code = tpm_db_set_api_busy(api_type, rule_num);
if (db_ret_code != TPM_DB_OK)
rc_code = ERR_API_BUSY; /* Table full */
}
spin_unlock_bh(&tpm_proc_api_call_lock);
return(rc_code);
}
int32_t tpm_proc_check_all_api_busy(void)
{
uint32_t db_ret_code;
int32_t rc_code = TPM_RC_OK;
int32_t rule_num;
int32_t rule_num_max;
tpm_api_sections_t api_section;
tpm_pnc_ranges_t range_id;
tpm_db_pnc_range_conf_t rangConf;
tpm_api_type_t api_type;
for (api_type = TPM_API_MAC_LEARN; api_type < TPM_MAX_API_TYPES; api_type++) {
/* Get api_section, range_Id, range configuration, to get range type */
db_ret_code = tpm_db_api_section_get_from_api_type(api_type, &api_section);
IF_ERROR(db_ret_code);
db_ret_code = tpm_db_api_section_main_pnc_get(api_section, &range_id);
if (TPM_DB_OK != db_ret_code)
continue;
db_ret_code = tpm_db_pnc_rng_conf_get(range_id, &rangConf);
IF_ERROR(db_ret_code);
if (rangConf.range_type == TPM_RANGE_TYPE_ACL)
rule_num_max = 1;
else
rule_num_max = TPM_MAX_PARALLEL_API_CALLS;
for (rule_num = 0; rule_num < rule_num_max; rule_num++) {
rc_code = tpm_proc_check_api_busy(api_type, rule_num);
IF_ERROR(rc_code);
}
}
return(TPM_OK);
}
int32_t tpm_proc_api_busy_done(tpm_api_type_t api_type, uint32_t rule_num)
{
int32_t rc_code = TPM_RC_OK;
uint32_t db_ret_code;
spin_lock_bh(&tpm_proc_api_call_lock);
db_ret_code = tpm_db_set_api_free(api_type, rule_num);
if (db_ret_code != TPM_DB_OK) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, "Error releasing api_busy: api_type(%d) rule_num(%d)\n", api_type, rule_num);
rc_code = ERR_GENERAL;
}
spin_unlock_bh(&tpm_proc_api_call_lock);
return(rc_code);
}
int32_t tpm_proc_all_api_busy_done(void)
{
uint32_t db_ret_code;
int32_t rc_code = TPM_RC_OK;
int32_t rule_num;
int32_t rule_num_max;
tpm_api_sections_t api_section;
tpm_pnc_ranges_t range_id;
tpm_db_pnc_range_conf_t rangConf;
tpm_api_type_t api_type;
for (api_type = TPM_API_MAC_LEARN; api_type < TPM_MAX_API_TYPES; api_type++) {
/* Get api_section, range_Id, range configuration, to get range type */
db_ret_code = tpm_db_api_section_get_from_api_type(api_type, &api_section);
IF_ERROR(db_ret_code);
db_ret_code = tpm_db_api_section_main_pnc_get(api_section, &range_id);
if (TPM_DB_OK != db_ret_code)
continue;
db_ret_code = tpm_db_pnc_rng_conf_get(range_id, &rangConf);
IF_ERROR(db_ret_code);
if (rangConf.range_type == TPM_RANGE_TYPE_ACL)
rule_num_max = 1;
else
rule_num_max = TPM_MAX_PARALLEL_API_CALLS;
for (rule_num = 0; rule_num < rule_num_max; rule_num++) {
rc_code = tpm_proc_api_busy_done(api_type, rule_num);
IF_ERROR(rc_code);
}
}
return(TPM_OK);
}
int32_t tpm_proc_parse_flag_ai_tcam_build(tpm_ai_vectors_t *ai_fields,
uint32_t parse_flags,
long long parse_int_flags,
uint32_t *ai_data,
uint32_t *ai_mask)
{
uint32_t ai_val;
tpm_init_double_tag_t dbl_tag;
tpm_src_port_type_t src_port_tmp;
/* Parsing flags */
/*BIT_0 */
if (parse_flags & TPM_PARSE_FLAG_TAG1_MASK) {
ai_val = ((parse_flags & TPM_PARSE_FLAG_TAG1_TRUE) ? 1 : 0);
*ai_data |= (ai_val << TPM_AI_TAG1_BIT_OFF);
*ai_mask |= TPM_AI_TAG1_MASK;
}
if (parse_int_flags & TPM_PARSE_FLAG_NH2_ITER_MASK) {
ai_val = ((parse_int_flags & TPM_PARSE_FLAG_NH2_ITER_TRUE) ? 1 : 0);
*ai_data |= (ai_val << TPM_AI_TAG1_BIT_OFF);
*ai_mask |= TPM_AI_TAG1_MASK;
}
if (parse_int_flags & TPM_PARSE_FLAG_IPV6_SUBFLOW_PARSE) {
if (ai_fields == NULL) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, " ipv6 ai vector cannot be null pointer\n");
return(TPM_FAIL);
}
*ai_data |= (((ai_fields->ipv6_subflow) << TPM_AI_IPV6_SUBFLOW_PART1_BIT_OFF) & TPM_AI_IPV6_SUBFLOW_PART1_MASK);
*ai_mask |= TPM_AI_IPV6_SUBFLOW_PART1_MASK;
}
if (parse_int_flags & TPM_PARSE_FLAG_IPV6_MC_SIP_PARSE) {
if (ai_fields == NULL) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, " ipv6 ai vector cannot be null pointer\n");
return(TPM_FAIL);
}
*ai_data |= (((ai_fields->ipv6_mc_sip_indx) << TPM_AI_IPV6_MC_SIP_PART1_BIT_OFF) & TPM_AI_IPV6_MC_SIP_PART1_MASK);
*ai_mask |= TPM_AI_IPV6_MC_SIP_PART1_MASK;
}
if (parse_int_flags & TPM_PARSE_FLAG_CNM_IPV4_MASK) {
ai_val = ((parse_int_flags & TPM_PARSE_FLAG_CNM_IPV4_TRUE) ? 1 : 0);
*ai_data |= (ai_val << TPM_AI_CNM_IPV4_BIT_OFF);
*ai_mask |= TPM_AI_CNM_IPV4_MASK;
}
/*BIT_1 */
if (parse_flags & TPM_PARSE_FLAG_TAG2_MASK) {
tpm_db_double_tag_support_get(&dbl_tag);
if (TPM_DOUBLE_TAG_ENABLED == dbl_tag) {
ai_val = ((parse_flags & TPM_PARSE_FLAG_TAG2_TRUE) ? 1 : 0);
*ai_data |= (ai_val << TPM_AI_TAG2_BIT_OFF);
*ai_mask |= TPM_AI_TAG2_MASK;
} else {
ai_val = ((parse_flags & TPM_PARSE_FLAG_TAG1_TRUE) ? 1 : 0);
*ai_data |= (ai_val << TPM_AI_TAG1_BIT_OFF);
*ai_mask |= TPM_AI_TAG1_MASK;
}
}
if (parse_flags & TPM_PARSE_FLAG_PPPOE_MASK) {
ai_val = ((parse_flags & TPM_PARSE_FLAG_PPPOE_TRUE) ? 1 : 0);
*ai_data |= (ai_val << TPM_AI_PPPOE_BIT_OFF);
*ai_mask |= TPM_AI_PPPOE_MASK;
}
if (parse_flags & TPM_PARSE_FLAG_L4P_MASK) {
ai_val = ((parse_flags & TPM_PARSE_FLAG_L4_UDP) ? 1 : 0);
*ai_data |= (ai_val << TPM_AI_L4P_BIT_OFF);
*ai_mask |= TPM_AI_L4P_MASK;
}
if (parse_int_flags & TPM_PARSE_FLAG_IPV4_PRE_KEY_PARSE) {
if (ai_fields == NULL) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, " ipv4 pre-filter key ai vector cannot be null pointer\n");
return(TPM_FAIL);
}
*ai_data |= (ai_fields->ipv4_pre_key << TPM_AI_CNM_IPV4_PRE_KEY_BIT_OFF);
*ai_mask |= (ai_fields->ipv4_pre_key << TPM_AI_CNM_IPV4_PRE_KEY_BIT_OFF);
}
if (parse_int_flags & TPM_PARSE_FLAG_CNM_PREC_PARSE) {
if (ai_fields == NULL) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, " cnm precedence ai vector cannot be null pointer\n");
return(TPM_FAIL);
}
*ai_data |= (ai_fields->cnm_prec << TPM_AI_CNM_PREC_BIT_OFF);
*ai_mask |= (ai_fields->cnm_prec << TPM_AI_CNM_PREC_BIT_OFF);
}
/*BIT_2 */
if (parse_flags & TPM_PARSE_FLAG_MTM_MASK) {
ai_val = ((parse_flags & TPM_PARSE_FLAG_MTM_TRUE) ? 1 : 0);
*ai_data |= (ai_val << TPM_AI_MTM_BIT_OFF);
*ai_mask |= TPM_AI_MTM_MASK;
}
if (parse_int_flags & TPM_PARSE_FLAG_IPV6_MC_SIP_PARSE) {
if (ai_fields == NULL) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, " ipv6 ai vector cannot be null pointer\n");
return(TPM_FAIL);
}
*ai_data |= (((ai_fields->ipv6_mc_sip_indx >> (TPM_AI_IPV6_MC_SIP_PART1_BIT_OFF + 1))
<< TPM_AI_IPV6_MC_SIP_PART2_BIT_OFF) & TPM_AI_IPV6_MC_SIP_PART2_MASK);
*ai_mask |= TPM_AI_IPV6_MC_SIP_PART2_MASK;
}
/*BIT_3 */
if (parse_flags & TPM_PARSE_FLAG_TO_CPU_MASK) {
ai_val = ((parse_flags & TPM_PARSE_FLAG_TO_CPU_TRUE) ? 1 : 0);
*ai_data |= (ai_val << TPM_AI_TO_CPU_BIT_OFF);
*ai_mask |= TPM_AI_TO_CPU_MASK;
}
if (parse_int_flags & TPM_PARSE_FLAG_SPLIT_MOD_MASK) {
ai_val = ((parse_int_flags & TPM_PARSE_FLGA_SPLIT_MOD_TRUE) ? 1 : 0);
*ai_data |= (ai_val << TPM_AI_SPLIT_MOD_BIT_OFF);
*ai_mask |= TPM_AI_SPLIT_MOD_MASK;
}
if (parse_int_flags & TPM_PARSE_FLAG_DNRT_DS_TRUNK) {
*ai_data &= ~(TPM_AI_DNRT_DS_TRUNK_MASK);
*ai_mask |= TPM_AI_DNRT_DS_TRUNK_MASK;
}
/*BIT_4 */
if (parse_int_flags & TPM_PARSE_FLAG_UNI_PORT_PARSE) {
if (ai_fields == NULL) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, " src_port ai vector cannot be null pointer\n");
return(TPM_FAIL);
}
if (ai_fields->src_port == TPM_SRC_PORT_UNI_VIRT)
src_port_tmp = TPM_SRC_PORT_UNI_7;
else
src_port_tmp = ai_fields->src_port;
*ai_data |= ((src_port_tmp - TPM_SRC_PORT_UNI_0) << TPM_AI_UNI_BIT_OFF);
*ai_mask |= TPM_AI_UNI_MASK;
}
if (parse_int_flags & TPM_PARSE_FLAG_PPPOE_ADD_MASK) {
ai_val = ((parse_int_flags & TPM_PARSE_FLAG_PPPOE_ADD_TRUE) ? 1 : 0);
*ai_data |= (ai_val << TPM_AI_PPPOE_ADD_BIT_OFF);
*ai_mask |= TPM_AI_PPPOE_ADD_MASK;
}
if (parse_int_flags & TPM_PARSE_FLAG_MC_VID_PARSE) {
if (ai_fields == NULL) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, " vid ai vector cannot be null pointer\n");
return(TPM_FAIL);
}
*ai_data |= ((ai_fields->mc_vid_entry) << TPM_AI_MC_VID_BIT_OFF);
*ai_mask |= TPM_AI_MC_VID_MASK;
}
if (parse_int_flags & TPM_PARSE_FLAG_IPV6_SUBFLOW_PARSE) {
if (ai_fields == NULL) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, " ipv6 ai vector cannot be null pointer\n");
return(TPM_FAIL);
}
*ai_data |= (((ai_fields->ipv6_subflow >> TPM_AI_IPV6_SUBFLOW_PART1_BIT_SIZE) << TPM_AI_IPV6_SUBFLOW_PART2_BIT_OFF) & TPM_AI_IPV6_SUBFLOW_PART2_MASK);
*ai_mask |= TPM_AI_IPV6_SUBFLOW_PART2_MASK;
}
/*BIT_5 */
if (parse_int_flags & TPM_PARSE_FLAG_NO_PROTO_MASK) {
ai_val = ((parse_int_flags & TPM_PARSE_FLAG_NO_PROTO_TRUE) ? 1 : 0);
*ai_data |= (ai_val << TPM_AI_NO_PROTO_BIT_OFF);
*ai_mask |= TPM_AI_NO_PROTO_MASK;
}
/*BIT_6 */
if (parse_int_flags & TPM_PARSE_FLAG_NO_FRAG_MASK) {
ai_val = ((parse_int_flags & TPM_PARSE_FLAG_NO_FRAG_TRUE) ? 1 : 0);
*ai_data |= (ai_val << TPM_AI_NO_FRAG_BIT_OFF);
*ai_mask |= TPM_AI_NO_FRAG_MASK;
}
if (parse_int_flags & TPM_PARSE_FLAG_MC_VID_MASK) {
ai_val = ((parse_int_flags & TPM_PARSE_FLAG_MC_VID_TRUE) ? 1 : 0);
*ai_data |= (ai_val << TPM_AI_MC_VID_VALID_BIT_OFF);
*ai_mask |= TPM_AI_MC_VID_VALID_MASK;
}
return(TPM_OK);
}
int32_t tpm_proc_static_ai_sram_build(tpm_ai_vectors_t *ai_fields,
tpm_pkt_action_t pkt_action,
long long int_pkt_action,
uint32_t *ai_data,
uint32_t *ai_mask)
{
tpm_src_port_type_t src_port_tmp;
/* Important Note: All TPM_ACTION_UNSET_XXX must be done first, because other bits may re-raise them */
if (int_pkt_action & TPM_ACTION_UNSET_DNRT) {
*ai_data &= ~(TPM_AI_DNRT_MASK);
*ai_mask |= TPM_AI_DNRT_MASK;
}
if (int_pkt_action & TPM_ACTION_UNSET_DNRT_DS_TRUNK) {
*ai_data &= ~(TPM_AI_DNRT_DS_TRUNK_MASK);
*ai_mask |= TPM_AI_DNRT_DS_TRUNK_MASK;
}
if (int_pkt_action & TPM_ACTION_UNSET_UNI_PORT) {
*ai_data &= ~(TPM_AI_UNI_MASK);
*ai_mask |= TPM_AI_UNI_MASK;
}
if (int_pkt_action & TPM_ACTION_UNSET_IPV6_SUBFLOW) {
*ai_data &= ~TPM_AI_IPV6_SUBFLOW_PART1_MASK;
*ai_mask |= TPM_AI_IPV6_SUBFLOW_PART1_MASK;
*ai_data &= ~TPM_AI_IPV6_SUBFLOW_PART2_MASK;
*ai_mask |= TPM_AI_IPV6_SUBFLOW_PART2_MASK;
}
if (int_pkt_action & TPM_ACTION_UNSET_PPPOE) {
*ai_data &= ~(TPM_AI_PPPOE_MASK);
*ai_mask |= TPM_AI_PPPOE_MASK;
}
if (int_pkt_action & TPM_ACTION_UNSET_NH2_ITER) {
*ai_data &= ~(TPM_AI_NH2_ITER_MASK);
*ai_mask |= TPM_AI_NH2_ITER_MASK;
}
if (int_pkt_action & TPM_ACTION_UNSET_CNM_IPV4) {
*ai_data &= ~(TPM_AI_CNM_IPV4_MASK);
*ai_mask |= TPM_AI_CNM_IPV4_MASK;
}
if (int_pkt_action & TPM_ACTION_UNSET_SPLIT_MOD) {
*ai_data &= ~(TPM_AI_SPLIT_MOD_MASK);
*ai_mask |= TPM_AI_SPLIT_MOD_MASK;
}
if (int_pkt_action & TPM_ACTION_UNSET_IPV4_PRE_KEY) {
*ai_data &= ~TPM_AI_CNM_IPV4_PRE_KEY_MASK;
*ai_mask |= TPM_AI_CNM_IPV4_PRE_KEY_MASK;
}
if (int_pkt_action & TPM_ACTION_UNSET_CNM_PREC) {
*ai_data &= ~TPM_AI_CNM_PREC_MASK;
*ai_mask |= TPM_AI_CNM_PREC_MASK;
}
if (int_pkt_action & TPM_ACTION_SET_TAG1) {
*ai_data |= (1 << TPM_AI_TAG1_BIT_OFF);
*ai_mask |= TPM_AI_TAG1_MASK;
}
if (int_pkt_action & TPM_ACTION_SET_TAG2) {
*ai_data |= (1 << TPM_AI_TAG1_BIT_OFF);
*ai_mask |= TPM_AI_TAG2_MASK;
}
if (int_pkt_action & TPM_ACTION_SET_PPPOE) {
*ai_data |= (1 << TPM_AI_PPPOE_BIT_OFF);
*ai_mask |= TPM_AI_PPPOE_MASK;
}
if (int_pkt_action & TPM_ACTION_SET_L4P_TOG_UDP) {
*ai_data |= (1 << TPM_AI_L4P_BIT_OFF);
*ai_mask |= TPM_AI_L4P_MASK;
}
if (int_pkt_action & TPM_ACTION_SET_L4P_TOG_TCP) {
*ai_data |= (0 << TPM_AI_L4P_BIT_OFF);
*ai_mask |= TPM_AI_L4P_MASK;
}
if (pkt_action & TPM_ACTION_MTM) {
*ai_data |= (1 << TPM_AI_MTM_BIT_OFF);
*ai_mask |= TPM_AI_MTM_MASK;
}
if (pkt_action & TPM_ACTION_TO_CPU) {
*ai_data |= (1 << TPM_AI_TO_CPU_BIT_OFF);
*ai_mask |= TPM_AI_TO_CPU_MASK;
}
if (int_pkt_action & TPM_ACTION_SET_ADD_PPPOE) {
*ai_data |= (1 << TPM_AI_PPPOE_ADD_BIT_OFF);
*ai_mask |= TPM_AI_PPPOE_ADD_MASK;
}
if (int_pkt_action & TPM_ACTION_SET_UNI_PORT) {
if (ai_fields == NULL) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, " uni_ports ai vector cannot be null pointer\n");
return(TPM_FAIL);
}
if (ai_fields->src_port == TPM_SRC_PORT_UNI_VIRT)
src_port_tmp = TPM_SRC_PORT_UNI_7;
else
src_port_tmp = ai_fields->src_port;
*ai_data |= ((src_port_tmp - TPM_SRC_PORT_UNI_0) << TPM_AI_UNI_BIT_OFF);
*ai_mask |= TPM_AI_UNI_MASK;
}
if (pkt_action & TPM_ACTION_SPEC_MC_VID) {
/* Set mc_valid bit */
*ai_data |= (1 << TPM_AI_MC_VID_VALID_BIT_OFF);
*ai_mask |= TPM_AI_MC_VID_VALID_MASK;
/* Set mc_vid vector */
if (ai_fields == NULL) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, " vid ai vector cannot be null pointer\n");
return(TPM_FAIL);
}
*ai_data |= ((ai_fields->mc_vid_entry) << TPM_AI_MC_VID_BIT_OFF);
*ai_mask |= TPM_AI_MC_VID_MASK;
}
if (int_pkt_action & TPM_ACTION_SET_NO_PROTO_CHECK) {
*ai_data |= (1 << TPM_AI_NO_PROTO_BIT_OFF);
*ai_mask |= TPM_AI_NO_PROTO_MASK;
}
if (int_pkt_action & TPM_ACTION_SET_NO_FRAG_CHECK) {
*ai_data |= (1 << TPM_AI_NO_FRAG_BIT_OFF);
*ai_mask |= TPM_AI_NO_FRAG_MASK;
}
if (int_pkt_action & TPM_ACTION_SET_NH2_ITER) {
*ai_data |= (1 << TPM_AI_NH2_ITER_BIT_OFF);
*ai_mask |= TPM_AI_NH2_ITER_MASK;
}
if (int_pkt_action & TPM_ACTION_SET_IPV6_SUBFLOW) {
if (ai_fields == NULL) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, " ipv6 ai vector cannot be null pointer\n");
return(TPM_FAIL);
}
*ai_data |= (((ai_fields->ipv6_subflow) << TPM_AI_IPV6_SUBFLOW_PART1_BIT_OFF) & TPM_AI_IPV6_SUBFLOW_PART1_MASK);
*ai_mask |= TPM_AI_IPV6_SUBFLOW_PART1_MASK;
*ai_data |= (((ai_fields->ipv6_subflow >> TPM_AI_IPV6_SUBFLOW_PART1_BIT_SIZE) << TPM_AI_IPV6_SUBFLOW_PART2_BIT_OFF) & TPM_AI_IPV6_SUBFLOW_PART2_MASK);
*ai_mask |= TPM_AI_IPV6_SUBFLOW_PART2_MASK;
}
if (int_pkt_action & TPM_ACTION_SET_CNM_IPV4) {
*ai_data |= (1 << TPM_AI_CNM_IPV4_BIT_OFF);
*ai_mask |= TPM_AI_CNM_IPV4_MASK;
}
if (int_pkt_action & TPM_ACTION_SET_SPLIT_MOD) {
*ai_data |= (1 << TPM_AI_SPLIT_MOD_BIT_OFF);
*ai_mask |= TPM_AI_SPLIT_MOD_MASK;
}
if (int_pkt_action & TPM_ACTION_SET_IPV4_PRE_KEY) {
if (ai_fields == NULL) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, " ipv4 pre filter key pattern cannot be null pointer\n");
return(TPM_FAIL);
}
*ai_data |= ((ai_fields->ipv4_pre_key << TPM_AI_CNM_IPV4_PRE_KEY_BIT_OFF) & TPM_AI_CNM_IPV4_PRE_KEY_MASK);
*ai_mask |= TPM_AI_CNM_IPV4_PRE_KEY_MASK;
}
if (int_pkt_action & TPM_ACTION_SET_CNM_PREC) {
if (ai_fields == NULL) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, " cnm precedence pattern cannot be null pointer\n");
return(TPM_FAIL);
}
*ai_data |= ((ai_fields->cnm_prec << TPM_AI_CNM_PREC_BIT_OFF) & TPM_AI_CNM_PREC_MASK);
*ai_mask |= TPM_AI_CNM_PREC_MASK;
}
return(TPM_OK);
}
/*******************************************************************************
* tpm_proc_virt_uni_trg_port_validation()
*
* DESCRIPTION:
* In case feature Virtual UNI_4 is enabled - only following target port are allowed:
* TPM_TRG_UNI_0 (0x0100)
* TPM_TRG_UNI_1 (0x0200)
* TPM_TRG_UNI_2 (0x0400)
* TPM_TRG_UNI_3 (0x0800)
* TPM_TRG_PORT_UNI_ANY (0x4000)
* INPUTS:
* tpm_trg_port_type_t trg_port
* OUTPUTS:
*
* RETURNS:
* On success, the function returns TPM_OK. On error different types are returned
* according to the case - see tpm_db_err_t.
*
* COMMENTS:
*
*******************************************************************************/
int32_t tpm_proc_virt_uni_trg_port_validation(tpm_trg_port_type_t trg_port)
{
/* unset TPM_TRG_LOAD_BAL first, since it can be mixed with UNI port */
trg_port &= (~TPM_TRG_LOAD_BAL);
if ((trg_port == TPM_TRG_UNI_0) ||
(trg_port == TPM_TRG_UNI_1) ||
(trg_port == TPM_TRG_UNI_2) ||
(trg_port == TPM_TRG_UNI_3) ||
(trg_port == TPM_TRG_UNI_4) ||
(trg_port == TPM_TRG_UNI_5) ||
(trg_port == TPM_TRG_UNI_6) ||
(trg_port == TPM_TRG_UNI_7) ||
(trg_port == TPM_TRG_UNI_VIRT) ||
(trg_port == TPM_TRG_PORT_UNI_ANY) ||
(trg_port == TPM_TRG_PORT_CPU) ||
(trg_port == (TPM_TRG_UNI_0 | TPM_TRG_UNI_1 | TPM_TRG_UNI_2 | TPM_TRG_UNI_3 |
TPM_TRG_UNI_4 | TPM_TRG_UNI_5 | TPM_TRG_UNI_6 | TPM_TRG_UNI_7)))
return TPM_OK;
else
return TPM_FAIL;
}
/*******************************************************************************
* tpm_proc_set_RI_mh()
*
* DESCRIPTION:
*
* INPUTS:
* rule_action
* pkt_frwd
* dir
* sram_data
* OUTPUTS:
*
* RETURNS:
* On success, the function returns TPM_OK. On error different types are returned
* according to the case - see tpm_db_err_t.
*
* COMMENTS:
*
*******************************************************************************/
int32_t tpm_proc_set_RI_mh(tpm_rule_action_t *rule_action,
tpm_pkt_frwd_t *pkt_frwd,
tpm_dir_t dir,
tpm_pncl_sram_data_t *sram_data)
{
uint32_t i;
int32_t ret_code;
tpm_db_mh_src_t ds_mh_src;
tpm_init_virt_uni_t virt_uni_info;
uint16_t amber_port_vector;
uint32_t pnc_vector, uni_vector;
TPM_OS_DEBUG(TPM_TPM_LOG_MOD, "tpm_proc_set_RI_mh: dir(%d), trg_port(0x%x) \r\n", dir, pkt_frwd->trg_port);
tpm_db_ds_mh_get_conf_set(&ds_mh_src);
if ((SET_TARGET_PORT(rule_action->pkt_act)) &&
(TO_LAN(dir, pkt_frwd->trg_port)) && (ds_mh_src == TPM_MH_SRC_PNC_RI)) {
sram_data->sram_updt_bm |= TPM_PNCL_SET_MH_RI;
sram_data->mh_reg.mh_set = TPM_TRUE;
/* target port validation -
not all target port combinations are supported when WiFi via GMAC1 = UNI_4 feature is enabled */
ret_code = tpm_db_virt_info_get(&virt_uni_info);
if (ret_code != TPM_DB_OK) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, " Virt UNI recvd ret_code(%d)\n", ret_code);
return(ret_code);
}
if ((virt_uni_info.enabled == 1) && (dir == TPM_DIR_DS)) {
/* Virt UNI feature is enabled - validate and recalculate the mh_reg */
ret_code = tpm_proc_virt_uni_trg_port_validation(pkt_frwd->trg_port);
if (ret_code != TPM_OK) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, " Virt UNI trg_port validation failed. (%d)\n", ret_code);
return(ret_code);
}
/* get the MH_REG from the DB */
ret_code = tpm_db_port_vector_tbl_info_search(pkt_frwd->trg_port, &uni_vector,
&amber_port_vector, &pnc_vector);
if (ret_code != TPM_DB_OK) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD,
" Unable to retrieve port vector table from DB. (0x%x)\n", pkt_frwd->trg_port);
return(ret_code);
}
sram_data->mh_reg.mh_reg = pnc_vector;
} else {
for (i = 0; i < TPM_MAX_NUM_UNI_PORTS; i++) {
if ((pkt_frwd->trg_port & (TPM_TRG_UNI_0 << i)) ||
(pkt_frwd->trg_port & TPM_TRG_PORT_UNI_ANY))
sram_data->mh_reg.mh_reg |= (TPM_MH_RI_BIT14 << i);
}
}
}
return(TPM_OK);
}
/*******************************************************************************
* tpm_owner_id_check()
*
* DESCRIPTION: The function checks if the owner_id is the valid owner of api_type
*
* INPUTS:
* owner_id - API Group owner
* api_type - API group the owner requests to act upon
*
* RETURNS:
* On success, the function returns TPM_OK. On error different types are returned
* according to the case - see tpm_db_err_t.
*
* COMMENTS:
*
*******************************************************************************/
tpm_error_code_t tpm_owner_id_check(tpm_api_type_t api_type, uint32_t owner_id)
{
/* TODO Implement */
return(TPM_OK);
}
/*******************************************************************************
* tpm_proc_add_cpu_loopback_check()
*
* DESCRIPTION: The function checks consistency of the tpm_proc_add_cpu_loopback_rule params.
*
* INPUTS:
* owner_id - See tpm_proc_add_l2_prim_acl_rule
* rule_num - See tpm_proc_add_l2_prim_acl_rule
* pkt_frwd - See tpm_proc_add_l2_prim_acl_rule
*
* RETURNS:
* On success, the function returns TPM_OK. On error different types are returned
* according to the case - see tpm_db_err_t.
*
* COMMENTS:
*
*******************************************************************************/
tpm_error_code_t tpm_proc_add_cpu_loopback_check(uint32_t owner_id, uint32_t rule_num, tpm_pkt_frwd_t *pkt_frwd)
{
int32_t ret_code;
/* Check TPM was successfully initialized */
if (!tpm_db_init_done_get())
IF_ERROR(ERR_SW_NOT_INIT);
/* Check owner_id */
ret_code = tpm_owner_id_check(TPM_API_CPU_LOOPBACK, owner_id);
if (ret_code != TPM_OK)
IF_ERROR(ERR_OWNER_INVALID);
/* Check rule_num, and api_section is active */
ret_code = tpm_proc_add_api_ent_check(TPM_CPU_LOOPBACK_ACL, TPM_RANGE_TYPE_ACL, rule_num);
if (ret_code != TPM_OK)
IF_ERROR(ERR_RULE_NUM_INVALID);
return(TPM_RC_OK);
}
/*******************************************************************************
* tpm_proc_add_l2_check()
*
* DESCRIPTION: The function checks consistency of the tpm_proc_add_l2_prim_acl_rule params.
*
* INPUTS:
* owner_id - See tpm_proc_add_l2_prim_acl_rule
* src_port - See tpm_proc_add_l2_prim_acl_rule
* rule_num - See tpm_proc_add_l2_prim_acl_rule
* parse_rule_bm - See tpm_proc_add_l2_prim_acl_rule
* l2_key - See tpm_proc_add_l2_prim_acl_rule
* pkt_frwd - See tpm_proc_add_l2_prim_acl_rule
* pkt_mod - See tpm_proc_add_l2_prim_acl_rule
* rule_action - See tpm_proc_add_l2_prim_acl_rule
*
* RETURNS:
* On success, the function returns TPM_OK. On error different types are returned
* according to the case - see tpm_db_err_t.
*
* COMMENTS:
*
*******************************************************************************/
tpm_error_code_t tpm_proc_add_l2_check(uint32_t owner_id,
tpm_src_port_type_t src_port,
uint32_t rule_num,
tpm_parse_fields_t parse_rule_bm,
tpm_l2_acl_key_t *l2_key,
tpm_parse_flags_t parse_flags_bm,
tpm_pkt_frwd_t *pkt_frwd,
tpm_pkt_mod_bm_t pkt_mod_bm,
tpm_pkt_mod_t *pkt_mod,
tpm_rule_action_t *rule_action)
{
int32_t ret_code;
tpm_dir_t dir;
tpm_db_pon_type_t pon_type;
tpm_pnc_ranges_t range_id = 0;
tpm_db_pnc_range_conf_t rangConf;
int32_t mc_vlan_free_slot;
/* Check TPM was successfully initialized */
if (!tpm_db_init_done_get())
IF_ERROR(ERR_SW_NOT_INIT);
/* Check Source Port */
ret_code = tpm_proc_src_port_check(src_port);
if (ret_code != TPM_OK)
IF_ERROR(ERR_SRC_PORT_INVALID);
/* Get Range_Id, rang configuration, to get range type */
ret_code = tpm_db_api_section_main_pnc_get(TPM_L2_PRIM_ACL, &range_id);
IF_ERROR(ret_code);
ret_code = tpm_db_pnc_rng_conf_get(range_id, &rangConf);
IF_ERROR(ret_code);
/* Get Direction, PON type, Important before other tests */
tpm_proc_src_port_dir_map(src_port, &dir);
tpm_db_pon_type_get(&pon_type);
/* Check necessary pointers are valid */
ret_code =
tpm_proc_check_missing_data(rule_action, pkt_mod, pkt_frwd, (void *)l2_key, rule_action->pkt_act,
parse_rule_bm);
IF_ERROR(ret_code);
/* Check Target_port and Queue are valid */
ret_code =
tpm_proc_check_valid_target(dir, pon_type, src_port, pkt_frwd->trg_port,
pkt_frwd->trg_queue, rule_action->pkt_act, TPM_FALSE);
IF_ERROR(ret_code);
/* Check parse_bm */
if (parse_rule_bm & (~(api_sup_param_val[TPM_ADD_L2_PRIM_ACL_RULE].sup_parse_fields))) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, "Invalid parse_rule_bm(0x%x) \n", parse_rule_bm);
return(ERR_PARSE_MAP_INVALID);
}
/* Check Vlan Tag TPID mask */
if (parse_rule_bm & (TPM_L2_PARSE_TWO_VLAN_TAG | TPM_L2_PARSE_ONE_VLAN_TAG)) {
if ((l2_key->vlan1.tpid_mask != 0) && (l2_key->vlan1.tpid_mask != 0xffff)) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, "Invalid vlan1 tpid mask(0x%x) \n", l2_key->vlan1.tpid_mask);
return(ERR_L2_KEY_INVALID);
}
if (parse_rule_bm & TPM_L2_PARSE_ONE_VLAN_TAG) {
if ((l2_key->vlan2.tpid_mask != 0) && (l2_key->vlan2.tpid_mask != 0xffff)) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, "Invalid vlan2 tpid mask(0x%x) \n", l2_key->vlan2.tpid_mask);
return(ERR_L2_KEY_INVALID);
}
}
}
/* Cannot do Double Vlan Tag with looking into PPPoE (up to 24Bytes) with MH */
if ((parse_rule_bm & TPM_L2_PARSE_TWO_VLAN_TAG) &&
((parse_rule_bm & TPM_L2_PARSE_PPP_PROT) || (parse_rule_bm & TPM_L2_PARSE_PPPOE_SES))) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, "Parse map of Double Vlan Tag + PPPoE not supported\n");
return(ERR_PARSE_MAP_INVALID);
}
/* Cannot do Single or Double Vlan Tag with checking PPP protocol (up to 24Bytes) with MH */
if (((parse_rule_bm & TPM_L2_PARSE_TWO_VLAN_TAG) || (parse_rule_bm & TPM_L2_PARSE_ONE_VLAN_TAG))
&& (parse_rule_bm & TPM_L2_PARSE_PPP_PROT)) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, "Parse map of Single Vlan Tag + PPPoE proto not supported\n");
return(ERR_PARSE_MAP_INVALID);
}
/* Check parse_flags_bm */
if (parse_flags_bm & (~(api_sup_param_val[TPM_ADD_L2_PRIM_ACL_RULE].sup_parse_flags))) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, "Invalid parse_flags_bm (0x%x) \n", parse_flags_bm);
return(ERR_PARSE_MAP_INVALID);
}
/*do not allow user to create VLANOP_NOOP l2 PNC with no vlan parse flag.*/
if ((TPM_SPLIT_MOD_ENABLED == tpm_db_split_mod_get_enable()) &&
(TPM_VLAN_MOD == pkt_mod_bm) &&
(VLANOP_NOOP == pkt_mod->vlan_mod.vlan_op)) {
if (!(parse_flags_bm &
(TPM_PARSE_FLAG_TAG1_TRUE |
TPM_PARSE_FLAG_TAG2_TRUE |
TPM_PARSE_FLAG_TAG1_FALSE |
TPM_PARSE_FLAG_TAG2_FALSE))) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, "Invalid parse_flags_bm (0x%x), "
"when split mod VLANOP_NOOP it must TAG FLAG be set\n", parse_flags_bm);
return(ERR_PARSE_MAP_INVALID);
}
}
/* check VLAN OP p_bit mask */
if (pkt_mod_bm & TPM_VLAN_MOD) {
if (VLANOP_EXT_TAG_INS == pkt_mod->vlan_mod.vlan_op) {
if (pkt_mod->vlan_mod.vlan1_out.pbit_mask != 0xff) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, "Invalid p_bit mask for VLAN Op (0x%x) \n", VLANOP_EXT_TAG_INS);
return(ERR_GENERAL);
}
}
if (VLANOP_INS_2TAG == pkt_mod->vlan_mod.vlan_op) {
if (pkt_mod->vlan_mod.vlan1_out.pbit_mask != 0xff ||
pkt_mod->vlan_mod.vlan2_out.pbit_mask != 0xff) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, "Invalid p_bit mask for VLAN Op (0x%x) \n", VLANOP_INS_2TAG);
return(ERR_GENERAL);
}
}
if (VLANOP_SPLIT_MOD_PBIT == pkt_mod->vlan_mod.vlan_op) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, "Invalid VLAN Op (0x%x), do not support in L2 \n", VLANOP_SPLIT_MOD_PBIT);
return(ERR_GENERAL);
}
}
/* Check parse_flags_bm - TRUE and FALSE are not set together */
ret_code = tpm_proc_check_parse_flag_valid(parse_flags_bm);
IF_ERROR(ret_code);
/* Check Packet Modification */
#if 0
tpm_proc_pkt_mod_check(rule_action->pkt_act, pkt_mod_bm, pkt_mod);
IF_ERROR(ret_code);
#endif
/* Check owner_id */
ret_code = tpm_owner_id_check(TPM_API_L2_PRIM, owner_id);
if (ret_code != TPM_OK)
IF_ERROR(ERR_OWNER_INVALID);
/* Check rule_num, and api_section is active */
ret_code = tpm_proc_add_api_ent_check(TPM_L2_PRIM_ACL, rangConf.range_type, rule_num);
if (ret_code != TPM_OK)
IF_ERROR(ERR_RULE_NUM_INVALID);
/* Check parse_rules */
/* Check gem_port only for GPON DS */
if ((parse_rule_bm & TPM_L2_PARSE_GEMPORT) && ((dir != TPM_DIR_DS) || (pon_type != TPM_GPON)))
IF_ERROR(ERR_PARSE_MAP_INVALID);
/* Check forwarding rule, currently only support STAGE_DONE */
if (rule_action->next_phase != STAGE_L3_TYPE && rule_action->next_phase != STAGE_DONE) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, " Next Phase (%d) is not supported \n", rule_action->next_phase);
return(ERR_NEXT_PHASE_INVALID);
}
/* Check that for l2_hwf rules (not mtm, not to_cpu), if the parsing is not done, the uni_port is specifc.
* Otherwise, rules in following sections cannot rely on the uni_port */
if (((rule_action->pkt_act & (TPM_ACTION_TO_CPU | TPM_ACTION_MTM)) == 0) &&
(rule_action->next_phase != STAGE_DONE)) {
if (!(FROM_SPEC_UNI(src_port)) && (src_port != TPM_SRC_PORT_WAN)) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD,
" For L2_HWF rule (not mtm, not to_cpu), specific uni must be specified \n");
return(ERR_SRC_PORT_INVALID);
}
}
/* Check rule action */
ret_code = tpm_proc_check_pkt_action(rule_action->pkt_act, pkt_frwd->trg_port, pkt_mod, pkt_mod_bm);
IF_ERROR(ret_code);
/* Check mc vlan set */
if (TPM_ACTION_SPEC_MC_VID & rule_action->pkt_act) {
if (src_port != TPM_SRC_PORT_WAN) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, " Multicast Vlan-ID can only be assigned in downstream \n");
return(ERR_ACTION_INVALID);
}
if (0 == l2_key->vlan1.vid_mask) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, " To spec MC vlan, Vlan-ID must be specified \n");
return(ERR_ACTION_INVALID);
} else if (l2_key->vlan1.vid == 0 || l2_key->vlan1.vid >= 4096) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, " Invalid multicast Vlan-ID is assigned \n");
return(ERR_L2_KEY_INVALID);
}
mc_vlan_free_slot = tpm_db_mc_vlan_get_pnc_index_free_slot(l2_key->vlan1.vid, rule_num);
if (0 == mc_vlan_free_slot) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, " No more free slot for l2 rule of this MC vlan\n");
return(ERR_GENERAL);
}
}
return(TPM_RC_OK);
}
/*******************************************************************************
* tpm_proc_add_ds_load_balance_check()
*
* DESCRIPTION: The function checks consistency of the tpm_proc_add_ds_load_balance_acl_rule params.
*
* INPUTS:
* owner_id - See tpm_proc_add_ds_load_balance_acl_rule
* rule_num - See tpm_proc_add_ds_load_balance_acl_rule
* parse_rule_bm - See tpm_proc_add_ds_load_balance_acl_rule
* l2_key - See tpm_proc_add_ds_load_balance_acl_rule
*
* RETURNS:
* On success, the function returns TPM_OK. On error different types are returned
* according to the case - see tpm_db_err_t.
*
* COMMENTS:
*
*******************************************************************************/
tpm_error_code_t tpm_proc_add_ds_load_balance_check(uint32_t owner_id,
uint32_t rule_num,
tpm_parse_fields_t parse_rule_bm,
tpm_parse_flags_t parse_flags_bm,
tpm_l2_acl_key_t *l2_key)
{
int32_t ret_code;
tpm_pnc_ranges_t range_id = 0;
tpm_db_pnc_range_conf_t rangConf;
/* Check TPM was successfully initialized */
if (!tpm_db_init_done_get())
IF_ERROR(ERR_SW_NOT_INIT);
/* Get Range_Id, rang configuration, to get range type */
ret_code = tpm_db_api_section_main_pnc_get(TPM_DS_LOAD_BALANCE_ACL, &range_id);
IF_ERROR(ret_code);
ret_code = tpm_db_pnc_rng_conf_get(range_id, &rangConf);
IF_ERROR(ret_code);
/* Check necessary pointers are valid */
if ((l2_key == NULL) && (parse_rule_bm != 0)) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, "Parsing requested with NULL pointer\n");
return(ERR_FRWD_INVALID);
}
IF_ERROR(ret_code);
/* Check parse_rule_bm */
if (parse_rule_bm & (~(api_sup_param_val[TPM_ADD_DS_LOAD_BALANCE_RULE].sup_parse_fields))) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, "Invalid parse_rule_bm(0x%x) \n", parse_rule_bm);
return(ERR_PARSE_MAP_INVALID);
}
/* Check parse_flag_bm */
if (parse_flags_bm & (~(api_sup_param_val[TPM_ADD_DS_LOAD_BALANCE_RULE].sup_parse_flags))) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, "Invalid parse_flags_bm(0x%x) \n", parse_flags_bm);
return(ERR_PARSE_MAP_INVALID);
}
/* Check Vlan Tag TPID mask */
if (parse_rule_bm & (TPM_L2_PARSE_TWO_VLAN_TAG | TPM_L2_PARSE_ONE_VLAN_TAG)) {
if ((l2_key->vlan1.tpid_mask != 0) && (l2_key->vlan1.tpid_mask != 0xffff)) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, "Invalid vlan1 tpid mask(0x%x) \n", l2_key->vlan1.tpid_mask);
return(ERR_L2_KEY_INVALID);
}
if (parse_rule_bm & TPM_L2_PARSE_ONE_VLAN_TAG) {
if ((l2_key->vlan2.tpid_mask != 0) && (l2_key->vlan2.tpid_mask != 0xffff)) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, "Invalid vlan2 tpid mask(0x%x) \n", l2_key->vlan2.tpid_mask);
return(ERR_L2_KEY_INVALID);
}
}
}
/* Cannot do Double Vlan Tag with looking into PPPoE (up to 24Bytes) with MH */
if ((parse_rule_bm & TPM_L2_PARSE_TWO_VLAN_TAG) &&
((parse_rule_bm & TPM_L2_PARSE_PPP_PROT) || (parse_rule_bm & TPM_L2_PARSE_PPPOE_SES))) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, "Parse map of Double Vlan Tag + PPPoE not supported\n");
return(ERR_PARSE_MAP_INVALID);
}
/* Cannot do Single or Double Vlan Tag with checking PPP protocol (up to 24Bytes) with MH */
if (((parse_rule_bm & TPM_L2_PARSE_TWO_VLAN_TAG) || (parse_rule_bm & TPM_L2_PARSE_ONE_VLAN_TAG))
&& (parse_rule_bm & TPM_L2_PARSE_PPP_PROT)) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, "Parse map of Single Vlan Tag + PPPoE proto not supported\n");
return(ERR_PARSE_MAP_INVALID);
}
/* Check owner_id */
ret_code = tpm_owner_id_check(TPM_API_DS_LOAD_BALANCE, owner_id);
if (ret_code != TPM_OK)
IF_ERROR(ERR_OWNER_INVALID);
/* Check rule_num, and api_section is active */
ret_code = tpm_proc_add_api_ent_check(TPM_DS_LOAD_BALANCE_ACL, rangConf.range_type, rule_num);
if (ret_code != TPM_OK)
IF_ERROR(ERR_RULE_NUM_INVALID);
return(TPM_RC_OK);
}
tpm_error_code_t tpm_proc_add_ipv6_gen_check(uint32_t owner_id,
tpm_src_port_type_t src_port,
uint32_t rule_num,
tpm_parse_fields_t parse_rule_bm,
tpm_parse_flags_t parse_flags_bm,
tpm_ipv6_gen_acl_key_t *ipv6_gen_key,
tpm_pkt_frwd_t *pkt_frwd,
tpm_pkt_mod_bm_t pkt_mod_bm,
tpm_pkt_mod_t *pkt_mod,
tpm_rule_action_t *rule_action)
{
int32_t ret_code;
tpm_dir_t dir;
tpm_db_pon_type_t pon_type;
tpm_db_pnc_range_t range_data;
tpm_init_ipv6_5t_enable_t ipv6_5t_enable;
/* Check TPM was successfully initialized */
if (!tpm_db_init_done_get())
IF_ERROR(ERR_SW_NOT_INIT);
/* Check 5_tuple feature is disabled */
tpm_db_ipv6_5t_enable_get(&ipv6_5t_enable);
if (ipv6_5t_enable != TPM_IPV6_5T_DISABLED)
return ERR_IPV6_API_ILLEGAL_CALL;
/* Check Source Port */
ret_code = tpm_proc_src_port_check(src_port);
if (ret_code != TPM_OK)
IF_ERROR(ERR_SRC_PORT_INVALID);
/* Get Direction, PON type, Important before other tests */
tpm_proc_src_port_dir_map(src_port, &dir);
tpm_db_pon_type_get(&pon_type);
/* Check necessary pointers are valid */
ret_code =
tpm_proc_check_missing_data(rule_action, pkt_mod, pkt_frwd, (void *)ipv6_gen_key, rule_action->pkt_act,
parse_rule_bm);
IF_ERROR(ret_code);
/* Check Target_port and Queue are valid */
ret_code =
tpm_proc_check_valid_target(dir, pon_type, src_port, pkt_frwd->trg_port,
pkt_frwd->trg_queue, rule_action->pkt_act, TPM_FALSE);
IF_ERROR(ret_code);
/* Check parse_bm */
if (parse_rule_bm & (~(api_sup_param_val[TPM_ADD_IPV6_GEN_ACL_RULE].sup_parse_fields))) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, "Invalid parse_rule_bm(0x%x) \n", parse_rule_bm);
return(ERR_PARSE_MAP_INVALID);
}
/* Check parse_flags_bm */
if (parse_flags_bm & (~(api_sup_param_val[TPM_ADD_IPV6_GEN_ACL_RULE].sup_parse_flags))) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, "Invalid parse_flags_bm (0x%x) \n", parse_flags_bm);
return(ERR_PARSE_MAP_INVALID);
}
/* Check parse_flags_bm - TRUE and FALSE are not set together */
ret_code = tpm_proc_check_parse_flag_valid(parse_flags_bm);
IF_ERROR(ret_code);
/* Check Packet Modification */
#if 0
tpm_proc_pkt_mod_check(rule_action->pkt_act, pkt_mod_bm, pkt_mod);
IF_ERROR(ret_code);
#endif
/* Check owner_id */
ret_code = tpm_owner_id_check(TPM_API_IPV6_GEN, owner_id);
if (ret_code != TPM_OK)
IF_ERROR(ERR_OWNER_INVALID);
/* Check rule_num, and api_section is active */
tpm_db_pnc_rng_get(TPM_PNC_IPV6_GEN, &range_data);
ret_code = tpm_proc_add_api_ent_check(TPM_IPV6_GEN_ACL, range_data.pnc_range_conf.range_type, rule_num);
if (ret_code != TPM_OK)
IF_ERROR(ERR_RULE_NUM_INVALID);
if (TPM_RANGE_TYPE_TABLE == range_data.pnc_range_conf.range_type) {
if ((rule_num < range_data.pnc_range_conf.api_start) || (rule_num > range_data.pnc_range_conf.api_end))
IF_ERROR(ERR_RULE_NUM_INVALID);
}
/* Check forwarding rule, currently only support STAGE_DONE */
if (rule_action->next_phase != STAGE_IPv6_DIP && rule_action->next_phase != STAGE_DONE) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, " Next Phase (%d) is not supported \n", rule_action->next_phase);
return(ERR_NEXT_PHASE_INVALID);
}
/* Check rule action */
ret_code = tpm_proc_check_pkt_action(rule_action->pkt_act, pkt_frwd->trg_port, pkt_mod, pkt_mod_bm);
IF_ERROR(ret_code);
if (rule_action->pkt_act & api_sup_param_val[TPM_ADD_IPV6_GEN_ACL_RULE].forbidden_actions) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, "Packet Action (0x%x) includes forbidden action\n", rule_action->pkt_act);
return(ERR_ACTION_INVALID);
}
return(TPM_RC_OK);
}
tpm_error_code_t tpm_proc_add_ipv6_dip_check(uint32_t owner_id,
tpm_src_port_type_t src_port,
uint32_t rule_num,
tpm_parse_fields_t parse_rule_bm,
tpm_parse_flags_t parse_flags_bm,
tpm_ipv6_addr_key_t *ipv6_dip_key,
tpm_pkt_frwd_t *pkt_frwd,
tpm_pkt_mod_bm_t pkt_mod_bm,
tpm_pkt_mod_t *pkt_mod,
tpm_rule_action_t *rule_action)
{
int32_t ret_code;
tpm_dir_t dir;
tpm_db_pon_type_t pon_type;
tpm_db_pnc_range_t range_data;
tpm_init_ipv6_5t_enable_t ipv6_5t_enable;
/* Check TPM was successfully initialized */
if (!tpm_db_init_done_get())
IF_ERROR(ERR_SW_NOT_INIT);
/* Check 5_tuple feature is disabled */
tpm_db_ipv6_5t_enable_get(&ipv6_5t_enable);
if (ipv6_5t_enable != TPM_IPV6_5T_DISABLED)
return ERR_IPV6_API_ILLEGAL_CALL;
/* Check Source Port */
ret_code = tpm_proc_src_port_check(src_port);
if (ret_code != TPM_OK)
IF_ERROR(ERR_SRC_PORT_INVALID);
/* Get Direction, PON type, Important before other tests */
tpm_proc_src_port_dir_map(src_port, &dir);
tpm_db_pon_type_get(&pon_type);
/* Check necessary pointers are valid */
ret_code =
tpm_proc_check_missing_data(rule_action, pkt_mod, pkt_frwd, (void *)ipv6_dip_key, rule_action->pkt_act,
parse_rule_bm);
IF_ERROR(ret_code);
/* Check Target_port and Queue are valid */
ret_code =
tpm_proc_check_valid_target(dir, pon_type, src_port, pkt_frwd->trg_port,
pkt_frwd->trg_queue, rule_action->pkt_act, TPM_FALSE);
IF_ERROR(ret_code);
/* Check parse_rule_bm */
if (parse_rule_bm & (~(api_sup_param_val[TPM_ADD_IPV6_DIP_ACL_RULE].sup_parse_fields))) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, "Invalid parse_rule_bm(0x%x) \n", parse_rule_bm);
return(ERR_PARSE_MAP_INVALID);
}
/* Check parse_flags_bm */
if (parse_flags_bm & (~(api_sup_param_val[TPM_ADD_IPV6_DIP_ACL_RULE].sup_parse_flags))) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, "Invalid parse_flags_bm (0x%x) \n", parse_flags_bm);
return(ERR_PARSE_MAP_INVALID);
}
/* Check parse_flags_bm - TRUE and FALSE are not set together */
ret_code = tpm_proc_check_parse_flag_valid(parse_flags_bm);
IF_ERROR(ret_code);
/* Check Packet Modification */
#if 0
tpm_proc_pkt_mod_check(rule_action->pkt_act, pkt_mod_bm, pkt_mod);
IF_ERROR(ret_code);
#endif
/* Check owner_id */
ret_code = tpm_owner_id_check(TPM_API_IPV6_DIP, owner_id);
if (ret_code != TPM_OK)
IF_ERROR(ERR_OWNER_INVALID);
/* Check rule_num, and api_section is active */
tpm_db_pnc_rng_get(TPM_PNC_IPV6_DIP, &range_data);
ret_code = tpm_proc_add_api_ent_check(TPM_IPV6_DIP_ACL, range_data.pnc_range_conf.range_type, rule_num);
if (ret_code != TPM_OK)
IF_ERROR(ERR_RULE_NUM_INVALID);
if (TPM_RANGE_TYPE_TABLE == range_data.pnc_range_conf.range_type) {
if ((rule_num < range_data.pnc_range_conf.api_start) || (rule_num > range_data.pnc_range_conf.api_end))
IF_ERROR(ERR_RULE_NUM_INVALID);
}
/* Check forwarding rule, currently only support STAGE_DONE */
if (rule_action->next_phase != STAGE_DONE) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, " Next Phase (%d) is not supported \n", rule_action->next_phase);
return(ERR_NEXT_PHASE_INVALID);
}
/* Check rule action */
ret_code = tpm_proc_check_pkt_action(rule_action->pkt_act, pkt_frwd->trg_port, pkt_mod, pkt_mod_bm);
IF_ERROR(ret_code);
if (rule_action->pkt_act & api_sup_param_val[TPM_ADD_IPV6_DIP_ACL_RULE].forbidden_actions) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, "Packet Action (0x%x) includes forbidden action\n", rule_action->pkt_act);
return(ERR_ACTION_INVALID);
}
return(TPM_RC_OK);
}
tpm_error_code_t tpm_proc_add_ipv6_l4ports_check(uint32_t owner_id,
tpm_src_port_type_t src_port,
uint32_t rule_num,
tpm_parse_fields_t parse_rule_bm,
tpm_parse_flags_t parse_flags_bm,
tpm_l4_ports_key_t *l4_key,
tpm_pkt_frwd_t *pkt_frwd,
tpm_pkt_mod_bm_t pkt_mod_bm,
tpm_pkt_mod_t *pkt_mod,
tpm_rule_action_t *rule_action)
{
int32_t ret_code;
tpm_dir_t dir;
tpm_db_pon_type_t pon_type;
tpm_db_pnc_range_t range_data;
tpm_init_ipv6_5t_enable_t ipv6_5t_enable;
/* Check TPM was successfully initialized */
if (!tpm_db_init_done_get())
IF_ERROR(ERR_SW_NOT_INIT);
/* Check 5_tuple feature is disabled */
tpm_db_ipv6_5t_enable_get(&ipv6_5t_enable);
if (ipv6_5t_enable != TPM_IPV6_5T_DISABLED)
return ERR_IPV6_API_ILLEGAL_CALL;
/* Check Source Port */
ret_code = tpm_proc_src_port_check(src_port);
if (ret_code != TPM_OK)
IF_ERROR(ERR_SRC_PORT_INVALID);
/* Get Direction, PON type, Important before other tests */
tpm_proc_src_port_dir_map(src_port, &dir);
tpm_db_pon_type_get(&pon_type);
/* Check necessary pointers are valid */
ret_code =
tpm_proc_check_missing_data(rule_action, pkt_mod, pkt_frwd, (void *)l4_key, rule_action->pkt_act,
parse_rule_bm);
IF_ERROR(ret_code);
/* Check Target_port and Queue are valid */
ret_code =
tpm_proc_check_valid_target(dir, pon_type, src_port, pkt_frwd->trg_port,
pkt_frwd->trg_queue, rule_action->pkt_act, TPM_FALSE);
IF_ERROR(ret_code);
/* Check parse_rule_bm */
if (parse_rule_bm & (~(api_sup_param_val[TPM_ADD_IPV6_L4_PORTS_ACL_RULE].sup_parse_fields))) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, "Invalid parse_rule_bm(0x%x) \n", parse_rule_bm);
return(ERR_PARSE_MAP_INVALID);
}
/* Check parse_flags_bm */
if (parse_flags_bm & (~(api_sup_param_val[TPM_ADD_IPV6_L4_PORTS_ACL_RULE].sup_parse_flags))) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, "Invalid parse_flags_bm (0x%x) \n", parse_flags_bm);
return(ERR_PARSE_MAP_INVALID);
}
/* Check parse_flags_bm - TRUE and FALSE are not set together */
ret_code = tpm_proc_check_parse_flag_valid(parse_flags_bm);
IF_ERROR(ret_code);
/* Check Packet Modification */
#if 0
tpm_proc_pkt_mod_check(rule_action->pkt_act, pkt_mod_bm, pkt_mod);
IF_ERROR(ret_code);
#endif
/* Check owner_id */
ret_code = tpm_owner_id_check(TPM_API_IPV6_L4, owner_id);
if (ret_code != TPM_OK)
IF_ERROR(ERR_OWNER_INVALID);
/* Check rule_num, and api_section is active */
tpm_db_pnc_rng_get(TPM_PNC_IPV6_L4, &range_data);
ret_code = tpm_proc_add_api_ent_check(TPM_L4_ACL, range_data.pnc_range_conf.range_type, rule_num);
if (ret_code != TPM_OK)
IF_ERROR(ERR_RULE_NUM_INVALID);
if (TPM_RANGE_TYPE_TABLE == range_data.pnc_range_conf.range_type) {
if ((rule_num < range_data.pnc_range_conf.api_start) || (rule_num > range_data.pnc_range_conf.api_end))
IF_ERROR(ERR_RULE_NUM_INVALID);
}
/* Check forwarding rule, STAGE_DONE */
if (rule_action->next_phase != STAGE_IPv6_GEN && rule_action->next_phase != STAGE_DONE
&& rule_action->next_phase != STAGE_CTC_CM) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, " Next Phase (%d) is not supported \n", rule_action->next_phase);
return(ERR_NEXT_PHASE_INVALID);
}
/* Check rule action */
ret_code = tpm_proc_check_pkt_action(rule_action->pkt_act, pkt_frwd->trg_port, pkt_mod, pkt_mod_bm);
IF_ERROR(ret_code);
if (rule_action->pkt_act & api_sup_param_val[TPM_ADD_IPV6_L4_PORTS_ACL_RULE].forbidden_actions) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, "Packet Action (0x%x) includes forbidden action\n", rule_action->pkt_act);
return(ERR_ACTION_INVALID);
}
/* Check if next stage CTC CnM */
if ((rule_action->next_phase == STAGE_CTC_CM) && !(parse_flags_bm & TPM_PARSE_FLAG_MTM_FALSE)) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, "next stage is CnM, packets must be MTM_FALSE\n");
return(ERR_NEXT_PHASE_INVALID);
}
return(TPM_RC_OK);
}
tpm_error_code_t tpm_proc_add_ipv6_nh_check(uint32_t owner_id,
uint32_t rule_num,
tpm_nh_iter_t nh_iter,
tpm_parse_flags_t parse_flags_bm,
uint32_t nh,
tpm_pkt_frwd_t *pkt_frwd,
tpm_rule_action_t *rule_action)
{
int32_t ret_code;
tpm_db_pnc_range_t range_data;
tpm_pkt_mod_t pkt_mod;
tpm_pkt_mod_bm_t pkt_mod_bm = 0;
tpm_db_ds_mac_based_trunk_enable_t ds_mac_based_trunk_enable;
/* Check TPM was successfully initialized */
if (!tpm_db_init_done_get())
IF_ERROR(ERR_SW_NOT_INIT);
/* Check keys exist for parse fields */
if (rule_action == NULL) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, "rule_action cannot be NULL\n");
return(ERR_ACTION_INVALID);
}
if ((pkt_frwd == NULL) && ((SET_TARGET_PORT(rule_action->pkt_act)) || SET_TARGET_QUEUE(rule_action->pkt_act))) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, "Target set requested with NULL pointer\n");
return(ERR_FRWD_INVALID);
}
/* Check Valid Target, ==> only allowed to direct to CPU */
if (SET_TARGET_PORT(rule_action->pkt_act)) {
if (TO_CPU(pkt_frwd->trg_port)) {
/*cont */
} else {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, "* trg_port=%d *\r\n", pkt_frwd->trg_port);
TPM_OS_ERROR(TPM_TPM_LOG_MOD, "Illegal Target Port\n");
return(ERR_FRWD_INVALID);
}
}
/* Check parse_flags_bm - TRUE and FALSE are not set together */
ret_code = tpm_proc_check_parse_flag_valid(parse_flags_bm);
IF_ERROR(ret_code);
/* Check Valid Queue */
/* TODO - Check Queue depending on actual queues in target or in Rx */
if (SET_TARGET_QUEUE(rule_action->pkt_act) && (pkt_frwd->trg_queue >= TPM_MAX_NUM_TX_QUEUE)) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, "Target Queue Out of Range\n");
return(ERR_FRWD_INVALID);
}
/* Check rule_num, and api_section is active */
tpm_db_pnc_rng_get(TPM_PNC_IPV6_NH, &range_data);
ret_code = tpm_proc_add_api_ent_check(TPM_IPV6_NH_ACL, range_data.pnc_range_conf.range_type, rule_num);
if (ret_code != TPM_OK)
IF_ERROR(ERR_RULE_NUM_INVALID);
if (TPM_RANGE_TYPE_TABLE == range_data.pnc_range_conf.range_type) {
if ((rule_num < range_data.pnc_range_conf.api_start) || (rule_num > range_data.pnc_range_conf.api_end))
IF_ERROR(ERR_RULE_NUM_INVALID);
}
/* Check forwarding rule */
if (rule_action->next_phase != STAGE_IPV6_L4 &&
rule_action->next_phase != STAGE_IPv6_NH &&
rule_action->next_phase != STAGE_IPv6_GEN &&
rule_action->next_phase != STAGE_CTC_CM &&
rule_action->next_phase != STAGE_DONE) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, " Next Phase (%d) is not supported \n", rule_action->next_phase);
return(ERR_NEXT_PHASE_INVALID);
}
if (rule_action->next_phase == STAGE_IPV6_L4) {
if (nh != IPPROTO_UDP && nh != IPPROTO_TCP) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, " Next Phase is IPV6_L4 while NH(%d) is not UDP or TCP \n", nh);
return(ERR_NEXT_PHASE_INVALID);
}
}
if (rule_action->next_phase == STAGE_IPv6_NH && nh_iter == NH_ITER_1) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, " Multiple Ext Hdr is not supported \n");
return(ERR_NEXT_PHASE_INVALID);
}
/* Check if next stage CTC CnM */
if (rule_action->next_phase == STAGE_CTC_CM) {
if (nh == IPPROTO_UDP || nh == IPPROTO_TCP) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, "Next Phase is CTC_CM while NH(%d) is UDP or TCP \n", nh);
return(ERR_NEXT_PHASE_INVALID);
}
if (0 == (parse_flags_bm & TPM_PARSE_FLAG_MTM_FALSE)) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, "Next stage is CnM, packets must be MTM_FALSE\n");
return(ERR_NEXT_PHASE_INVALID);
}
}
/* Check rule action */
ret_code = tpm_proc_check_pkt_action(rule_action->pkt_act, pkt_frwd->trg_port, &pkt_mod, pkt_mod_bm);
IF_ERROR(ret_code);
if (rule_action->pkt_act & api_sup_param_val[TPM_ADD_IPV6_NH_ACL_RULE].forbidden_actions) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, "Packet Action (0x%x) includes forbidden action\n", rule_action->pkt_act);
return(ERR_ACTION_INVALID);
}
/* when ds load balance on G0 and G1 is enabled, no 2 NH rule can be added */
tpm_db_ds_mac_based_trunk_enable_get(&ds_mac_based_trunk_enable);
if ( (TPM_DS_MAC_BASED_TRUNK_ENABLED == ds_mac_based_trunk_enable)
&& (NH_ITER_1 == nh_iter)) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, "when ds load balance on G0 and G1 is enabled, "
"no 2 NH rule can be added\n");
return(ERR_FEAT_UNSUPPORT);
}
return(TPM_RC_OK);
}
/*******************************************************************************
* tpm_proc_add_ipv4_check()
*
* DESCRIPTION: The function checks consistency of the tpm_proc_add_ipv4_acl_rule params.
*
* INPUTS:
* - See tpm_proc_add_ipv4_acl_rule
*
* RETURNS:
* On success, the function returns TPM_OK. On error different types are returned
* according to the case - see tpm_db_err_t.
*
* COMMENTS:
*
*******************************************************************************/
tpm_error_code_t tpm_proc_add_ipv4_check(uint32_t owner_id,
tpm_src_port_type_t src_port,
uint32_t rule_num,
tpm_parse_fields_t parse_rule_bm,
tpm_parse_flags_t parse_flags_bm,
tpm_ipv4_acl_key_t *ipv4_key,
tpm_pkt_frwd_t *pkt_frwd,
tpm_pkt_mod_t *pkt_mod,
tpm_rule_action_t *rule_action,
tpm_pkt_mod_bm_t pkt_mod_bm)
{
int32_t ret_code;
tpm_dir_t dir;
tpm_db_pon_type_t pon_type;
tpm_pnc_ranges_t range_id = 0;
tpm_db_pnc_range_conf_t rangConf;
/* Check TPM was successfully initialized */
if (!tpm_db_init_done_get())
IF_ERROR(ERR_SW_NOT_INIT);
/* Check Source Port */
ret_code = tpm_proc_src_port_check(src_port);
if (ret_code != TPM_OK)
IF_ERROR(ERR_SRC_PORT_INVALID);
/* Get Direction, PON type, Important before other tests */
tpm_proc_src_port_dir_map(src_port, &dir);
tpm_db_pon_type_get(&pon_type);
/* Get Range_Id, rang configuration, to get range type */
ret_code = tpm_db_api_section_main_pnc_get(TPM_IPV4_ACL, &range_id);
IF_ERROR(ret_code);
ret_code = tpm_db_pnc_rng_conf_get(range_id, &rangConf);
IF_ERROR(ret_code);
/* Check necessary pointers are valid */
ret_code =
tpm_proc_check_missing_data(rule_action, pkt_mod, pkt_frwd, (void *)ipv4_key, rule_action->pkt_act,
parse_rule_bm);
IF_ERROR(ret_code);
/* Check Target_port and Queue are valid */
ret_code =
tpm_proc_check_valid_target(dir, pon_type, src_port, pkt_frwd->trg_port,
pkt_frwd->trg_queue, rule_action->pkt_act, TPM_TRUE);
IF_ERROR(ret_code);
/* Check Packet Modification */
#if 0
tpm_proc_pkt_mod_check(rule_action->pkt_act, pkt_mod_bm, pkt_mod);
IF_ERROR(ret_code);
#endif
/* Check parse_bm */
if (parse_rule_bm & (~(api_sup_param_val[TPM_ADD_IPV4_ACL_RULE].sup_parse_fields))) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, "Invalid parse_rule_bm(0x%x) \n", parse_rule_bm);
return(ERR_PARSE_MAP_INVALID);
}
/* Check parse_flags_bm */
if (parse_flags_bm & (~(api_sup_param_val[TPM_ADD_IPV4_ACL_RULE].sup_parse_flags))) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, "Invalid parse_flags_bm (0x%x) \n", parse_flags_bm);
return(ERR_PARSE_MAP_INVALID);
}
/* Check parse_flags_bm - TRUE and FALSE are not set together */
ret_code = tpm_proc_check_parse_flag_valid(parse_flags_bm);
IF_ERROR(ret_code);
/* Check owner_id */
ret_code = tpm_owner_id_check(TPM_API_IPV4, owner_id);
if (ret_code != TPM_OK)
IF_ERROR(ERR_OWNER_INVALID);
/* Check rule_num, and api_section is active */
ret_code = tpm_proc_add_api_ent_check(TPM_IPV4_ACL, rangConf.range_type, rule_num);
if (ret_code != TPM_OK)
IF_ERROR(ERR_RULE_NUM_INVALID);
/* Check forwarding rule, support STAGE_DONE */
if ( rule_action->next_phase != STAGE_DONE
&& rule_action->next_phase != STAGE_CTC_CM) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, " Next Phase (%d) is not supported \n", rule_action->next_phase);
return(ERR_NEXT_PHASE_INVALID);
}
/* Check rule action */
ret_code = tpm_proc_check_pkt_action(rule_action->pkt_act, pkt_frwd->trg_port, pkt_mod, pkt_mod_bm);
IF_ERROR(ret_code);
if (rule_action->pkt_act & api_sup_param_val[TPM_ADD_IPV4_ACL_RULE].forbidden_actions) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, "Packet Action (0x%x) includes forbidden action\n", rule_action->pkt_act);
return(ERR_ACTION_INVALID);
}
if(rule_action->next_phase == STAGE_CTC_CM) {
if(!tpm_ctc_cm_ipv4_rule2cm(parse_flags_bm, rule_action)) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, "next stage is CTC_CM, parse flag must be MTM_FALSE, and action can not be TO_CPU\n");
return(ERR_NEXT_PHASE_INVALID);
}
if(FROM_WAN(src_port)) {
TPM_OS_ERROR(TPM_TPM_LOG_MOD, "next stage is CTC_CM, Src Port can not be WAN\n");
return(ERR_SRC_PORT_INVALID);
}
}
return(TPM_RC_OK);
}
/*******************************************************************************
* tpm_proc_del_l2_check()
*
* DESCRIPTION: The function checks consistency of the tpm_proc_del_l2_prim_acl_rule params.
*
* INPUTS:
* owner_id - See tpm_proc_del_l2_prim_acl_rule
* src_port - See tpm_proc_del_l2_prim_acl_rule
* rule_idx - See tpm_proc_del_l2_prim_acl_rule
* parse_rule_bm - See tpm_proc_del_l2_prim_acl_rule
* l2_key - See tpm_proc_del_l2_prim_acl_rule
*
* RETURNS:
* On success, the function returns TPM_OK. On error different types are returned
* according to the case - see tpm_db_err_t.
*
* COMMENTS:
*
*******************************************************************************/
tpm_error_code_t tpm_proc_del_l2_check(uint32_t owner_id,
tpm_src_port_type_t src_port,
uint32_t rule_idx,
tpm_parse_fields_t parse_rule_bm,
tpm_l2_acl_key_t *l2_key)
{
int32_t ret_code;
uint32_t rule_num;
tpm_dir_t dir;
/* Check TPM was successfully initialized */
if (!tpm_db_init_done_get())
IF_ERROR(ERR_SW_NOT_INIT);
/* check that rule_idx or parse_bm or l2_key are valid - for deletion */
if ((rule_idx == 0) && ((l2_key == NULL) || (parse_rule_bm == 0)))
IF_ERROR(ERR_DELETE_KEY_INVALID);
/* Check owner_id */
ret_code = tpm_owner_id_check(TPM_API_L2_PRIM, owner_id);
if (ret_code != TPM_OK)
IF_ERROR(ERR_OWNER_INVALID);
/* Check Source Port */
ret_code = tpm_proc_src_port_check(src_port);
if (ret_code != TPM_OK)
IF_ERROR(ERR_SRC_PORT_INVALID);
/* Get Direction, Important before other tests */
tpm_proc_src_port_dir_map(src_port, &dir);
/* Check valid rule_idx */
if (rule_idx != 0) { /* when rule_idx = 0 -> caller indicates to work according to the l2_key */
ret_code = tpm_db_api_rulenum_get(TPM_L2_PRIM_ACL, rule_idx, &rule_num);
if (ret_code != TPM_OK)
IF_ERROR(ERR_RULE_IDX_INVALID);
}
return(TPM_OK);
}
/*******************************************************************************
* tpm_proc_common_pncl_info_get()
*
* DESCRIPTION:
*
* INPUTS:
* api_section - Section of the API acl/table
* dir - Direction of the API acl/table
* api_rule_num - Rulenum in the API table
*
* OUTPUTS:
* pnc_entry - PnC Entry matching this api_table/rule_num
* lu_id - LookupId of this PNC Range
* start_offset - Logical TCAM start offset of this API Table
*
* RETURNS:
* On success, the function returns TPM_OK. On error different types are returned
* according to the case - see tpm_db_err_t.
*
* COMMENTS:
*
*******************************************************************************/
int32_t tpm_proc_common_pncl_info_get(tpm_pnc_ranges_t range_id, uint32_t *lu_id, tpm_pncl_offset_t *start_offset)
{
int32_t ret_code;
tpm_db_pnc_range_conf_t range_conf;
TPM_OS_DEBUG(TPM_TPM_LOG_MOD, "range_id(%d)\n", range_id);
ret_code = tpm_db_pnc_rng_conf_get(range_id, &range_conf);
IF_ERROR(ret_code);
*lu_id = range_conf.base_lu_id;
start_offset->range_id = range_id;
if ( range_id == TPM_PNC_L2_MAIN || range_id == TPM_PNC_MAC_LEARN
|| range_id == TPM_PNC_DS_LOAD_BALANCE) {
start_offset->offset_base = TPM_PNCL_ZERO_OFFSET;
start_offset->offset_sub.subf = TPM_L2_PARSE_MH;
} else if (range_id == TPM_PNC_ETH_TYPE) {
start_offset->offset_base = TPM_PNCL_L3_OFFSET;
start_offset->offset_sub.subf = TPM_L2_PARSE_ETYPE;
} else if ((range_id == TPM_PNC_IPV4_MAIN)
|| (range_id == TPM_PNC_IPV4_MC_DS)
|| (range_id == TPM_PNC_IPV4_PROTO)
|| (range_id == TPM_PNC_CNM_IPV4_PRE)
|| (range_id == TPM_PNC_IGMP)) {
start_offset->offset_base = TPM_PNCL_IPV4_OFFSET;
start_offset->offset_sub.subf = TPM_IPv4_PARSE_VER_OR_IHL;
} else if ( (range_id == TPM_PNC_IPV6_NH)
|| (range_id == TPM_PNC_IPV6_GEN)
|| (range_id == TPM_PNC_IPV6_MC_SIP)) {
start_offset->offset_base = TPM_PNCL_IPV6_OFFSET;
start_offset->