Home | History | Annotate | Download | only in parser
      1 #include "fw_pvt.h"
      2 #include "viddec_fw_parser_ipclib_config.h"
      3 #include "viddec_fw_common_defs.h"
      4 #include "viddec_fw_parser.h"
      5 #include "viddec_fw_debug.h"
      6 
      7 /* This define makes sure that the structure is stored in Local memory.
      8    This is shared memory between host and FW.*/
      9 volatile dmem_t _dmem __attribute__ ((section (".exchange")));
     10 /* Debug index should be disbaled for Production FW */
     11 uint32_t dump_ptr=0;
     12 uint32_t timer=0;
     13 
     14 /* Auto Api definitions */
     15 ismd_api_group viddec_fw_api_array[2];
     16 
     17 extern void viddec_fw_parser_register_callbacks(void);
     18 
     19 /*------------------------------------------------------------------------------
     20  * Function:  initialize firmware SVEN TX Output
     21  *------------------------------------------------------------------------------
     22  */
     23 int SMDEXPORT viddec_fw_parser_sven_init(struct SVEN_FW_Globals  *sven_fw_globals )
     24 {
     25     extern int sven_fw_set_globals(struct SVEN_FW_Globals  *fw_globals );
     26     return(sven_fw_set_globals(sven_fw_globals));
     27 }
     28 
     29 /*------------------------------------------------------------------------------
     30  * Function:  viddec_fw_check_watermark_boundary
     31  * This function figures out if we crossesd watermark boundary on input data.
     32  * before represents the ES Queue data when we started and current represents ES Queue data
     33  * when we are ready to swap.Threshold is the amount of data specified by the driver to trigger an
     34  * interrupt.
     35  * We return true if threshold is between before and current.
     36  *------------------------------------------------------------------------------
     37  */
     38 static inline uint32_t viddec_fw_check_watermark_boundary(uint32_t before, uint32_t current, uint32_t threshold)
     39 {
     40     return ((before >= threshold) && (current < threshold));
     41 }
     42 
     43 /*------------------------------------------------------------------------------
     44  * Function:  viddec_fw_get_total_input_Q_data
     45  * This function figures out how much data is available in input queue of the FW
     46  *------------------------------------------------------------------------------
     47  */
     48 static uint32_t viddec_fw_get_total_input_Q_data(uint32_t indx)
     49 {
     50     FW_IPC_Handle *fwipc = GET_IPC_HANDLE(_dmem);
     51     uint32_t ret;
     52     int32_t pos=0;
     53     FW_IPC_ReceiveQue   *rcv_q;
     54 
     55     rcv_q = &fwipc->rcv_q[indx];
     56     /* count the cubby buffer which we already read if present */
     57     ret = (_dmem.stream_info[indx].buffered_data) ? CONFIG_IPC_MESSAGE_MAX_SIZE:0;
     58     ret += ipc_mq_read_avail(&rcv_q->mq, (int32_t *)&pos);
     59     return ret;
     60 }
     61 
     62 /*------------------------------------------------------------------------------
     63  * Function:  mfd_round_robin
     64  * Params:
     65  *        [in]  pri: Priority of the stream
     66  *        [in] indx: stream id number of the last stream that was scheduled.
     67  *        [out] qnum: Stream id of priority(pri) which has data.
     68  * This function is responsible for figuring out which stream needs to be scheduled next.
     69  * It starts after the last scheduled stream and walks through all streams until it finds
     70  * a stream which is of required priority, in start state, has space on output and data in
     71  * input.
     72  * If no such stream is found qnum is not updated and return value is 0.
     73  * If a stream is found then qnum is updated with that id and function returns 1.
     74  *------------------------------------------------------------------------------
     75  */
     76 
     77 uint32_t mfd_round_robin(uint32_t pri, int32_t *qnum, int32_t indx)
     78 {
     79     FW_IPC_Handle *fwipc = GET_IPC_HANDLE(_dmem);
     80     int32_t i = CONFIG_IPC_FW_MAX_RX_QUEUES;
     81     uint32_t ret = 0;
     82     /* Go through all queues until we find a valid queue of reqd priority */
     83     while(i>0)
     84     {
     85         indx++;
     86         if(indx >=  CONFIG_IPC_FW_MAX_RX_QUEUES) indx = 0;
     87 
     88         /* We should look only at queues which match priority and
     89            in running state */
     90         if( (_dmem.stream_info[indx].state == 1)
     91             && (_dmem.stream_info[indx].priority == pri))
     92         {
     93             uint32_t inpt_avail=0, output_avail=0, wklds_avail =0 , pos;
     94             FW_IPC_ReceiveQue   *rcv_q;
     95             rcv_q = &fwipc->rcv_q[indx];
     96             inpt_avail = (_dmem.stream_info[indx].buffered_data > 0) || (ipc_mq_read_avail(&rcv_q->mq, (int32_t *)&pos) > 0);
     97             /* we have to check for two workloads to protect against error cases where we might have to push both current and next workloads */
     98             output_avail = FwIPC_SpaceAvailForMessage(fwipc, &fwipc->snd_q[indx], CONFIG_IPC_MESSAGE_MAX_SIZE, &pos) >= 2;
     99             pos = 0;
    100             /* Need at least current and next to proceed */
    101             wklds_avail =  (ipc_mq_read_avail(&fwipc->wkld_q[indx].mq, (int32_t *)&pos) >= (CONFIG_IPC_MESSAGE_MAX_SIZE << 1));
    102             if(inpt_avail && output_avail && wklds_avail)
    103             {/* Success condition: we have some data on input and enough space on output queue */
    104                 *qnum = indx;
    105                 ret =1;
    106                 break;
    107             }
    108         }
    109         i--;
    110     }
    111     return ret;
    112 }
    113 static inline void mfd_setup_emitter(FW_IPC_Handle *fwipc, FW_IPC_ReceiveQue *rcv_q, mfd_pk_strm_cxt *cxt)
    114 {
    115     int32_t ret1=0,ret=0;
    116     /* We don't check return values for the peek as round robin guarantee's that we have required free workloads */
    117     ret = FwIPC_PeekReadMessage(fwipc, rcv_q, (char *)&(cxt->wkld1), sizeof(ipc_msg_data), 0);
    118     ret1 = FwIPC_PeekReadMessage(fwipc, rcv_q, (char *)&(cxt->wkld2), sizeof(ipc_msg_data), 1);
    119     viddec_emit_update(&(cxt->pm.emitter), cxt->wkld1.phys, cxt->wkld2.phys, cxt->wkld1.len, cxt->wkld2.len);
    120 }
    121 
    122 static inline void mfd_init_swap_memory(viddec_pm_cxt_t *pm, uint32_t codec_type, uint32_t start_addr, uint32_t clean)
    123 {
    124     uint32_t *persist_mem;
    125     persist_mem = (uint32_t *)(start_addr | GV_DDR_MEM_MASK);
    126     viddec_pm_init_context(pm,codec_type, persist_mem, clean);
    127     pm->sc_prefix_info.first_sc_detect = 1;
    128     viddec_emit_init(&(pm->emitter));
    129 }
    130 
    131 void output_omar_wires( unsigned int value )
    132 {
    133 #ifdef RTL_SIMULATION
    134     reg_write(CONFIG_IPC_ROFF_HOST_DOORBELL, value );
    135 #endif
    136 }
    137 
    138 /*------------------------------------------------------------------------------
    139  * Function:  viddec_fw_init_swap_memory
    140  * This function is responsible for seeting the swap memory to a good state for current stream.
    141  * The swap parameter tells us whether we need to dma the context to local memory.
    142  * We call init on emitter and parser manager which inturn calls init of the codec we are opening the stream for.
    143  *------------------------------------------------------------------------------
    144  */
    145 
    146 void viddec_fw_init_swap_memory(unsigned int stream_id, unsigned int swap, unsigned int clean)
    147 {
    148     mfd_pk_strm_cxt *cxt;
    149     mfd_stream_info *cxt_swap;
    150     cxt = (mfd_pk_strm_cxt *)&(_dmem.srm_cxt);
    151     cxt_swap = (mfd_stream_info *)&(_dmem.stream_info[stream_id]);
    152 
    153     if(swap)
    154     {/* Swap context into local memory */
    155         cp_using_dma(cxt_swap->ddr_cxt, (uint32_t) &(cxt->pm), sizeof(viddec_pm_cxt_t), false, false);
    156     }
    157 
    158     {
    159         mfd_init_swap_memory(&(cxt->pm), cxt_swap->strm_type, cxt_swap->ddr_cxt+cxt_swap->cxt_size, clean);
    160         cxt_swap->wl_time = 0;
    161         cxt_swap->es_time = 0;
    162     }
    163     if(swap)
    164     {/* Swap context into DDR */
    165         cp_using_dma(cxt_swap->ddr_cxt, (uint32_t) &(cxt->pm), sizeof(viddec_pm_cxt_t), true, false);
    166     }
    167 }
    168 
    169 /*------------------------------------------------------------------------------
    170  * Function: viddec_fw_push_current_frame_to_output
    171  * This is a helper function to read a workload from input queue and push to output queue.
    172  * This is called when are done with a frame.
    173  *------------------------------------------------------------------------------
    174  */
    175 static inline void viddec_fw_push_current_frame_to_output(FW_IPC_Handle *fwipc, uint32_t cur)
    176 {
    177     ipc_msg_data wkld_to_push;
    178     FwIPC_ReadMessage(fwipc, &fwipc->wkld_q[cur], (char *)&(wkld_to_push), sizeof(ipc_msg_data));
    179     FwIPC_SendMessage(fwipc, cur, (char *)&(wkld_to_push),  sizeof(ipc_msg_data));
    180 }
    181 
    182 /*------------------------------------------------------------------------------
    183  * Function: viddec_fw_get_next_stream_to_schedule
    184  * This is a helper function to figure out which active stream needs to be scheduled next.
    185  * If none of the streams are active it returns -1.
    186  *------------------------------------------------------------------------------
    187  */
    188 static inline int viddec_fw_get_next_stream_to_schedule(void)
    189 {
    190     int32_t cur = -1;
    191 
    192     if(mfd_round_robin(viddec_stream_priority_REALTIME, &cur, _dmem.g_pk_data.high_id))
    193     {
    194         /* On success store the stream id */
    195         _dmem.g_pk_data.high_id = cur;
    196     }
    197     else
    198     {
    199         /* Check Low priority Queues, Since we couldn't find a valid realtime stream */
    200         if(mfd_round_robin(viddec_stream_priority_BACKGROUND, &cur, _dmem.g_pk_data.low_id))
    201         {
    202             _dmem.g_pk_data.low_id = cur;
    203         }
    204     }
    205 
    206     return cur;
    207 }
    208 
    209 /*------------------------------------------------------------------------------
    210  * Function: viddec_fw_update_pending_interrupt_flag
    211  * This is a helper function to figure out if we need to mark an interrupt pending for this stream.
    212  * We update status value here if we find any of the interrupt conditions are true.
    213  * If this stream has a interrupt pending which we could not send to host, we don't overwrite past status info.
    214  *------------------------------------------------------------------------------
    215  */
    216 static inline void viddec_fw_update_pending_interrupt_flag(int32_t cur, mfd_stream_info *cxt_swap, uint8_t pushed_a_workload,
    217                                                            uint32_t es_Q_data_at_start)
    218 {
    219     if(_dmem.int_status[cur].mask)
    220     {
    221         if(!cxt_swap->pending_interrupt)
    222         {
    223             uint32_t es_Q_data_now;
    224             uint8_t wmark_boundary_reached=false;
    225             es_Q_data_now = viddec_fw_get_total_input_Q_data((uint32_t)cur);
    226             wmark_boundary_reached = viddec_fw_check_watermark_boundary(es_Q_data_at_start, es_Q_data_now, cxt_swap->low_watermark);
    227             _dmem.int_status[cur].status = 0;
    228             if(pushed_a_workload)
    229             {
    230                 _dmem.int_status[cur].status |= VIDDEC_FW_WKLD_DATA_AVAIL;
    231             }
    232             if(wmark_boundary_reached)
    233             {
    234                 _dmem.int_status[cur].status |= VIDDEC_FW_INPUT_WATERMARK_REACHED;
    235             }
    236             cxt_swap->pending_interrupt = ( _dmem.int_status[cur].status != 0);
    237         }
    238     }
    239     else
    240     {
    241         cxt_swap->pending_interrupt = false;
    242     }
    243 }
    244 
    245 static inline void viddec_fw_handle_error_and_inband_messages(int32_t cur, uint32_t pm_ret)
    246 {
    247     FW_IPC_Handle *fwipc = GET_IPC_HANDLE(_dmem);
    248 
    249     viddec_fw_push_current_frame_to_output(fwipc, cur);
    250     switch(pm_ret)
    251     {
    252         case PM_EOS:
    253         case PM_OVERFLOW:
    254         {
    255             viddec_fw_init_swap_memory(cur, false, true);
    256         }
    257         break;
    258         case PM_DISCONTINUITY:
    259         {
    260             viddec_fw_init_swap_memory(cur, false, false);
    261         }
    262         break;
    263         default:
    264             break;
    265     }
    266 }
    267 
    268 void viddec_fw_debug_scheduled_stream_state(int32_t indx, int32_t start)
    269 {
    270     FW_IPC_Handle *fwipc = GET_IPC_HANDLE(_dmem);
    271     uint32_t inpt_avail=0, output_avail=0, wklds_avail =0 , pos;
    272     FW_IPC_ReceiveQue   *rcv_q;
    273     uint32_t message;
    274 
    275     message = (start) ? SVEN_MODULE_EVENT_GV_FW_PK_SCHDL_STRM_START: SVEN_MODULE_EVENT_GV_FW_PK_SCHDL_STRM_END;
    276     rcv_q = &fwipc->rcv_q[indx];
    277     inpt_avail = ipc_mq_read_avail(&rcv_q->mq, (int32_t *)&pos);
    278     inpt_avail += ((_dmem.stream_info[indx].buffered_data > 0) ? CONFIG_IPC_MESSAGE_MAX_SIZE: 0);
    279     inpt_avail = inpt_avail >> 4;
    280     pos = 0;
    281     output_avail = ipc_mq_read_avail(&fwipc->snd_q[indx].mq, (int32_t *)&pos);
    282     output_avail = output_avail >> 4;
    283     pos = 0;
    284     wklds_avail =  ipc_mq_read_avail(&fwipc->wkld_q[indx].mq, (int32_t *)&pos);
    285     wklds_avail = wklds_avail >> 4;
    286     WRITE_SVEN(message, (int)indx, (int)inpt_avail, (int)output_avail,
    287                (int)wklds_avail, 0, 0);
    288 }
    289 
    290 /*------------------------------------------------------------------------------
    291  * Function:  viddec_fw_process_async_queues(A.K.A -> Parser Kernel)
    292  * This function is responsible for handling the asynchronous queues.
    293  *
    294  * The first step is to figure out which stream to run. The current algorithm
    295  * will go through all high priority queues for a valid stream, if not found we
    296  * go through lower priority queues.
    297  *
    298  * If a valid stream is found we swap the required context from DDR to DMEM and do all necessary
    299  * things to setup the stream.
    300  * Once a stream is setup we call the parser manager and wait until a wrkld is created or no more input
    301  * data left.
    302  * Once we find a wkld we push it to host and save the current context to DDR.
    303  *------------------------------------------------------------------------------
    304  */
    305 
    306 static inline int32_t viddec_fw_process_async_queues()
    307 {
    308     int32_t cur = -1;
    309 
    310     cur = viddec_fw_get_next_stream_to_schedule();
    311 
    312     if(cur != -1)
    313     {
    314         FW_IPC_Handle *fwipc = GET_IPC_HANDLE(_dmem);
    315         FW_IPC_ReceiveQue   *rcv_q;
    316         /* bits captured by OMAR */
    317         output_omar_wires( 0x0 );
    318         rcv_q = &fwipc->rcv_q[cur];
    319         {
    320             mfd_pk_strm_cxt *cxt;
    321             mfd_stream_info *cxt_swap;
    322             cxt = (mfd_pk_strm_cxt *)&(_dmem.srm_cxt);
    323             cxt_swap = (mfd_stream_info *)&(_dmem.stream_info[cur]);
    324 
    325             /* Step 1: Swap rodata to local memory. Not doing this currently as all the rodata fits in local memory. */
    326             {/* Step 2: Swap context into local memory */
    327                 cp_using_dma(cxt_swap->ddr_cxt, (uint32_t) &(cxt->pm), sizeof(viddec_pm_cxt_t), false, false);
    328             }
    329             /* Step 3:setup emitter by reading input data and workloads and initialising it */
    330             mfd_setup_emitter(fwipc, &fwipc->wkld_q[cur], cxt);
    331             viddec_fw_debug_scheduled_stream_state(cur, true);
    332             /* Step 4: Call Parser Manager until workload done or No more ES buffers */
    333             {
    334                 ipc_msg_data *data = 0;
    335                 uint8_t stream_active = true, pushed_a_workload=false;
    336                 uint32_t pm_ret = PM_SUCCESS, es_Q_data_at_start;
    337                 uint32_t start_time, time=0;
    338 
    339                 start_time = set_wdog(VIDDEC_WATCHDOG_COUNTER_MAX);
    340                 timer=0;
    341                 es_Q_data_at_start = viddec_fw_get_total_input_Q_data((uint32_t)cur);
    342                 do
    343                 {
    344                     output_omar_wires( 0x1 );
    345                     {
    346                         uint32_t es_t0,es_t1;
    347                         get_wdog(&es_t0);
    348                         pm_ret = viddec_pm_parse_es_buffer(&(cxt->pm), cxt_swap->strm_type, data);
    349                         get_wdog(&es_t1);
    350                         cxt_swap->es_time += get_total_ticks(es_t0, es_t1);
    351                     }
    352                     switch(pm_ret)
    353                     {
    354                         case PM_EOS:
    355                         case PM_WKLD_DONE:
    356                         case PM_OVERFLOW:
    357                         case PM_DISCONTINUITY:
    358                         {/* Finished a frame worth of data or encountered fatal error*/
    359                             stream_active = false;
    360                         }
    361                         break;
    362                         case PM_NO_DATA:
    363                         {
    364                             uint32_t next_ret=0;
    365                             if ( (NULL != data) && (0 != cxt_swap->es_time) )
    366                             {
    367                                 /* print performance info for this buffer */
    368                                 WRITE_SVEN(SVEN_MODULE_EVENT_GV_FW_PK_ES_DONE, (int)cur, (int)cxt_swap->es_time, (int)cxt->input.phys,
    369                                            (int)cxt->input.len, (int)cxt->input.id, (int)cxt->input.flags );
    370                                 cxt_swap->es_time = 0;
    371                             }
    372 
    373                             next_ret = FwIPC_ReadMessage(fwipc, rcv_q, (char *)&(cxt->input), sizeof(ipc_msg_data));
    374                             if(next_ret != 0)
    375                             {
    376                                 data = &(cxt->input);
    377                                 WRITE_SVEN(SVEN_MODULE_EVENT_GV_FW_PK_ES_START, (int)cur, (int)cxt_swap->wl_time,
    378                                            (int)cxt->input.phys, (int)cxt->input.len, (int)cxt->input.id, (int)cxt->input.flags );
    379                             }
    380                             else
    381                             {/* No data on input queue */
    382                                 cxt_swap->buffered_data = 0;
    383                                 stream_active = false;
    384                             }
    385                         }
    386                         break;
    387                         default:
    388                         {/* Not done with current buffer */
    389                             data = NULL;
    390                         }
    391                         break;
    392                     }
    393                 }while(stream_active);
    394                 get_wdog(&time);
    395                 cxt_swap->wl_time += get_total_ticks(start_time, time);
    396                 /* Step 5: If workload done push workload out */
    397                 switch(pm_ret)
    398                 {
    399                     case PM_EOS:
    400                     case PM_WKLD_DONE:
    401                     case PM_OVERFLOW:
    402                     case PM_DISCONTINUITY:
    403                     {/* Push current workload as we are done with the frame */
    404                         cxt_swap->buffered_data = (PM_WKLD_DONE == pm_ret) ? true: false;
    405                         viddec_pm_update_time(&(cxt->pm), cxt_swap->wl_time);
    406 
    407                         /* xmit performance info for this workload output */
    408                         WRITE_SVEN( SVEN_MODULE_EVENT_GV_FW_PK_WL_DONE, (int)cur, (int)cxt_swap->wl_time, (int)cxt->wkld1.phys,
    409                                     (int)cxt->wkld1.len, (int)cxt->wkld1.id, (int)cxt->wkld1.flags );
    410                         cxt_swap->wl_time = 0;
    411 
    412                         viddec_fw_push_current_frame_to_output(fwipc, cur);
    413                         if(pm_ret != PM_WKLD_DONE)
    414                         {
    415                             viddec_fw_handle_error_and_inband_messages(cur, pm_ret);
    416                         }
    417                         pushed_a_workload = true;
    418                     }
    419                     break;
    420                     default:
    421                         break;
    422                 }
    423                 /* Update information on whether we have active interrupt for this stream */
    424                 viddec_fw_update_pending_interrupt_flag(cur, cxt_swap, pushed_a_workload, es_Q_data_at_start);
    425             }
    426             viddec_fw_debug_scheduled_stream_state(cur, false);
    427             /* Step 6: swap context into DDR */
    428             {
    429                 cp_using_dma(cxt_swap->ddr_cxt, (uint32_t) &(cxt->pm), sizeof(viddec_pm_cxt_t), true, false);
    430             }
    431         }
    432 
    433     }
    434     return cur;
    435 }
    436 
    437 
    438 /*------------------------------------------------------------------------------
    439  * Function:  process_command
    440  * This magic function figures out which function to excute based on autoapi.
    441  *------------------------------------------------------------------------------
    442  */
    443 
    444 static inline void process_command(uint32_t cmd_id, unsigned char *command)
    445 {
    446     int32_t groupid = ((cmd_id >> 24) - 13) & 0xff;
    447     int32_t funcid = cmd_id & 0xffffff;
    448     /* writing func pointer to hsot doorbell */
    449     output_omar_wires( (int) viddec_fw_api_array[groupid].unmarshal[funcid] );
    450     WRITE_SVEN( SVEN_MODULE_EVENT_GV_FW_AUTOAPI_CMD,(int) cmd_id, (int) command, ((int *)command)[0],
    451                 ((int *)command)[1], ((int *)command)[2], ((int *)command)[3] );
    452 
    453     viddec_fw_api_array[groupid].unmarshal[funcid](0, command);
    454 
    455 }
    456 
    457 /*------------------------------------------------------------------------------
    458  * Function:  viddec_fw_process_sync_queues(A.K.A auto api)
    459  * Params:
    460  *       [in] msg: common sync structure where all required parameters are present for autoapi.
    461  *
    462  * This function is responsible for handling synchronous messages. All synchronous messages
    463  * are handled through auto api.
    464  * what are synchronous messages?  Anything releated to teardown or opening a stream Ex: open, close, flush etc.
    465  *
    466  * Only once synchronous message at a time. When a synchronous message its id is usually in cp doorbell. Once
    467  * we are done handling synchronous message through auto api we release doorbell to let the host write next
    468  * message.
    469  *------------------------------------------------------------------------------
    470  */
    471 
    472 static inline int32_t viddec_fw_process_sync_queues(unsigned char *msg)
    473 {
    474     int32_t ret = -1;
    475 
    476     if(0 == reg_read(CONFIG_IPC_ROFF_RISC_DOORBELL_STATUS))
    477     {
    478         uint32_t command1=0;
    479         command1 = reg_read(CONFIG_IPC_ROFF_RISC_RX_DOORBELL);
    480         process_command(command1, msg);
    481         reg_write(CONFIG_IPC_ROFF_RISC_DOORBELL_STATUS, 0x2); /* Inform Host we are done with this message */
    482         ret = 0;
    483     }
    484     return ret;
    485 }
    486 
    487 /*------------------------------------------------------------------------------
    488  * Function:  viddec_fw_check_for_pending_int
    489  * This function walks through all active streams to see if atleast one stream has a pending interrupt
    490  * and returns true if it finds one.
    491  *------------------------------------------------------------------------------
    492  */
    493 static inline uint32_t viddec_fw_check_for_pending_int(void)
    494 {
    495     uint32_t i=0, ret=false;
    496     /* start from 0 to max streams that fw can handle*/
    497     while(i < FW_SUPPORTED_STREAMS)
    498     {
    499         if(_dmem.stream_info[i].state == 1)
    500         {
    501             if((_dmem.stream_info[i].pending_interrupt) && _dmem.int_status[i].mask)
    502             {
    503                 ret = true;
    504             }
    505             else
    506             {/* If this is not in INT state clear the status before sending it to host */
    507                 _dmem.int_status[i].status = 0;
    508             }
    509         }
    510         i++;
    511     }
    512     return ret;
    513 }
    514 
    515 /*------------------------------------------------------------------------------
    516  * Function:  viddec_fw_clear_processed_int
    517  * This function walks through all active streams to clear pending interrupt state.This is
    518  * called after a INT was issued.
    519  *------------------------------------------------------------------------------
    520  */
    521 static inline void viddec_fw_clear_processed_int(void)
    522 {
    523     uint32_t i=0;
    524     /* start from 0 to max streams that fw can handle*/
    525     while(i < FW_SUPPORTED_STREAMS)
    526     {
    527         //if(_dmem.stream_info[i].state == 1)
    528         _dmem.stream_info[i].pending_interrupt = false;
    529         i++;
    530     }
    531     return;
    532 }
    533 
    534 /*------------------------------------------------------------------------------
    535  * Function:  viddec_fw_int_host
    536  * This function interrupts host if data is available for host or any other status
    537  * is valid which the host configures the FW to.
    538  * There is only one interrupt line so this is a shared Int for all streams, Host should
    539  * look at status of all streams when it receives a Int.
    540  * The FW will interrupt the host only if host doorbell is free, in other words the host
    541  * should always make the doorbell free at the End of its ISR.
    542  *------------------------------------------------------------------------------
    543  */
    544 
    545 static inline int32_t viddec_fw_int_host()
    546 {
    547     /* We Interrupt the host only if host is ready to receive an interrupt */
    548     if((reg_read(CONFIG_IPC_ROFF_HOST_DOORBELL_STATUS) & GV_DOORBELL_STATS) == GV_DOORBELL_STATS)
    549     {
    550         if(viddec_fw_check_for_pending_int())
    551         {
    552             /* If a pending interrupt is found trigger INT */
    553             reg_write(CONFIG_IPC_ROFF_HOST_DOORBELL, VIDDEC_FW_PARSER_IPC_HOST_INT);
    554             /* Clear all stream's pending Interrupt info since we use a global INT for all streams */
    555             viddec_fw_clear_processed_int();
    556         }
    557     }
    558     return 1;
    559 }
    560 volatile unsigned int stack_corrupted __attribute__ ((section (".stckovrflwchk")));
    561 /*------------------------------------------------------------------------------
    562  * Function:  main
    563  * This function is the main firmware function. Its a infinite loop where it polls
    564  * for messages and processes them if they are available. Currently we ping pong between
    565  * synchronous and asynchronous messages one at a time. If we have multiple aysnchronous
    566  * queues we always process only one between synchronous messages.
    567  *
    568  * For multiple asynchronous queues we round robin through the high priorities first and pick
    569  * the first one available. Next time when we come around for asynchronous message we start
    570  * from the next stream onwards so this guarantees that we give equal time slices for same
    571  * priority queues. If no high priority queues are active we go to low priority queues and repeat
    572  * the same process.
    573  *------------------------------------------------------------------------------
    574  */
    575 
    576 int main(void)
    577 {
    578     unsigned char *msg = (uint8_t *)&(_dmem.buf.data[0]);
    579 
    580     /* We wait until host reads sync message */
    581     reg_write(CONFIG_IPC_ROFF_HOST_RX_DOORBELL, GV_FW_IPC_HOST_SYNC);
    582 
    583     while ( GV_DOORBELL_STATS != reg_read(CONFIG_IPC_ROFF_HOST_DOORBELL_STATUS) )
    584     { /*poll register until done bit is set */
    585       /* Host re-writes Vsparc DRAM (BSS) in this loop and will hit the DONE bit when complete */
    586     }
    587     enable_intr();
    588     /* Initialize State for queues */
    589     viddec_fw_parser_register_callbacks();
    590     FwIPC_Initialize(GET_IPC_HANDLE(_dmem), (volatile char *)msg);
    591     _dmem.g_pk_data.high_id = _dmem.g_pk_data.low_id = -1;
    592     viddec_pm_init_ops();
    593     stack_corrupted = 0xDEADBEEF;
    594     while(1)
    595     {
    596         viddec_fw_process_sync_queues(msg);
    597         viddec_fw_process_async_queues();
    598         viddec_fw_int_host();
    599 #if 0
    600         if(stack_corrupted != 0xDEADBEEF)
    601         {
    602             WRITE_SVEN(SVEN_MODULE_EVENT_GV_FW_FATAL_STACK_CORRPON, 0, 0, 0, 0, 0, 0);
    603             while(1);
    604         }
    605 #endif
    606     }
    607     return 1;
    608 }
    609