A Discrete-Event Network Simulator
API
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Groups Pages
pf-ff-mac-scheduler.cc
1 /* -*- Mode:C++; c-file-style:"gnu"; indent-tabs-mode:nil; -*- */
2 /*
3  * Copyright (c) 2011 Centre Tecnologic de Telecomunicacions de Catalunya (CTTC)
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as
7  * published by the Free Software Foundation;
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17  *
18  * Author: Marco Miozzo <marco.miozzo@cttc.es>
19  */
20 
21 #include <ns3/log.h>
22 #include <ns3/pointer.h>
23 #include <ns3/math.h>
24 
25 #include <ns3/simulator.h>
26 #include <ns3/lte-amc.h>
27 #include <ns3/pf-ff-mac-scheduler.h>
28 #include <ns3/lte-vendor-specific-parameters.h>
29 #include <ns3/boolean.h>
30 #include <set>
31 
32 NS_LOG_COMPONENT_DEFINE ("PfFfMacScheduler");
33 
34 namespace ns3 {
35 
36 int PfType0AllocationRbg[4] = {
37  10, // RGB size 1
38  26, // RGB size 2
39  63, // RGB size 3
40  110 // RGB size 4
41 }; // see table 7.1.6.1-1 of 36.213
42 
43 
44 NS_OBJECT_ENSURE_REGISTERED (PfFfMacScheduler);
45 
46 
47 
49 {
50 public:
52 
53  // inherited from FfMacCschedSapProvider
54  virtual void CschedCellConfigReq (const struct CschedCellConfigReqParameters& params);
55  virtual void CschedUeConfigReq (const struct CschedUeConfigReqParameters& params);
56  virtual void CschedLcConfigReq (const struct CschedLcConfigReqParameters& params);
57  virtual void CschedLcReleaseReq (const struct CschedLcReleaseReqParameters& params);
58  virtual void CschedUeReleaseReq (const struct CschedUeReleaseReqParameters& params);
59 
60 private:
62  PfFfMacScheduler* m_scheduler;
63 };
64 
65 PfSchedulerMemberCschedSapProvider::PfSchedulerMemberCschedSapProvider ()
66 {
67 }
68 
69 PfSchedulerMemberCschedSapProvider::PfSchedulerMemberCschedSapProvider (PfFfMacScheduler* scheduler) : m_scheduler (scheduler)
70 {
71 }
72 
73 
74 void
76 {
77  m_scheduler->DoCschedCellConfigReq (params);
78 }
79 
80 void
81 PfSchedulerMemberCschedSapProvider::CschedUeConfigReq (const struct CschedUeConfigReqParameters& params)
82 {
83  m_scheduler->DoCschedUeConfigReq (params);
84 }
85 
86 
87 void
88 PfSchedulerMemberCschedSapProvider::CschedLcConfigReq (const struct CschedLcConfigReqParameters& params)
89 {
90  m_scheduler->DoCschedLcConfigReq (params);
91 }
92 
93 void
94 PfSchedulerMemberCschedSapProvider::CschedLcReleaseReq (const struct CschedLcReleaseReqParameters& params)
95 {
96  m_scheduler->DoCschedLcReleaseReq (params);
97 }
98 
99 void
100 PfSchedulerMemberCschedSapProvider::CschedUeReleaseReq (const struct CschedUeReleaseReqParameters& params)
101 {
102  m_scheduler->DoCschedUeReleaseReq (params);
103 }
104 
105 
106 
107 
109 {
110 public:
112 
113  // inherited from FfMacSchedSapProvider
114  virtual void SchedDlRlcBufferReq (const struct SchedDlRlcBufferReqParameters& params);
115  virtual void SchedDlPagingBufferReq (const struct SchedDlPagingBufferReqParameters& params);
116  virtual void SchedDlMacBufferReq (const struct SchedDlMacBufferReqParameters& params);
117  virtual void SchedDlTriggerReq (const struct SchedDlTriggerReqParameters& params);
118  virtual void SchedDlRachInfoReq (const struct SchedDlRachInfoReqParameters& params);
119  virtual void SchedDlCqiInfoReq (const struct SchedDlCqiInfoReqParameters& params);
120  virtual void SchedUlTriggerReq (const struct SchedUlTriggerReqParameters& params);
121  virtual void SchedUlNoiseInterferenceReq (const struct SchedUlNoiseInterferenceReqParameters& params);
122  virtual void SchedUlSrInfoReq (const struct SchedUlSrInfoReqParameters& params);
123  virtual void SchedUlMacCtrlInfoReq (const struct SchedUlMacCtrlInfoReqParameters& params);
124  virtual void SchedUlCqiInfoReq (const struct SchedUlCqiInfoReqParameters& params);
125 
126 
127 private:
129  PfFfMacScheduler* m_scheduler;
130 };
131 
132 
133 
134 PfSchedulerMemberSchedSapProvider::PfSchedulerMemberSchedSapProvider ()
135 {
136 }
137 
138 
139 PfSchedulerMemberSchedSapProvider::PfSchedulerMemberSchedSapProvider (PfFfMacScheduler* scheduler)
140  : m_scheduler (scheduler)
141 {
142 }
143 
144 void
145 PfSchedulerMemberSchedSapProvider::SchedDlRlcBufferReq (const struct SchedDlRlcBufferReqParameters& params)
146 {
147  m_scheduler->DoSchedDlRlcBufferReq (params);
148 }
149 
150 void
151 PfSchedulerMemberSchedSapProvider::SchedDlPagingBufferReq (const struct SchedDlPagingBufferReqParameters& params)
152 {
153  m_scheduler->DoSchedDlPagingBufferReq (params);
154 }
155 
156 void
157 PfSchedulerMemberSchedSapProvider::SchedDlMacBufferReq (const struct SchedDlMacBufferReqParameters& params)
158 {
159  m_scheduler->DoSchedDlMacBufferReq (params);
160 }
161 
162 void
163 PfSchedulerMemberSchedSapProvider::SchedDlTriggerReq (const struct SchedDlTriggerReqParameters& params)
164 {
165  m_scheduler->DoSchedDlTriggerReq (params);
166 }
167 
168 void
169 PfSchedulerMemberSchedSapProvider::SchedDlRachInfoReq (const struct SchedDlRachInfoReqParameters& params)
170 {
171  m_scheduler->DoSchedDlRachInfoReq (params);
172 }
173 
174 void
175 PfSchedulerMemberSchedSapProvider::SchedDlCqiInfoReq (const struct SchedDlCqiInfoReqParameters& params)
176 {
177  m_scheduler->DoSchedDlCqiInfoReq (params);
178 }
179 
180 void
181 PfSchedulerMemberSchedSapProvider::SchedUlTriggerReq (const struct SchedUlTriggerReqParameters& params)
182 {
183  m_scheduler->DoSchedUlTriggerReq (params);
184 }
185 
186 void
187 PfSchedulerMemberSchedSapProvider::SchedUlNoiseInterferenceReq (const struct SchedUlNoiseInterferenceReqParameters& params)
188 {
189  m_scheduler->DoSchedUlNoiseInterferenceReq (params);
190 }
191 
192 void
193 PfSchedulerMemberSchedSapProvider::SchedUlSrInfoReq (const struct SchedUlSrInfoReqParameters& params)
194 {
195  m_scheduler->DoSchedUlSrInfoReq (params);
196 }
197 
198 void
199 PfSchedulerMemberSchedSapProvider::SchedUlMacCtrlInfoReq (const struct SchedUlMacCtrlInfoReqParameters& params)
200 {
201  m_scheduler->DoSchedUlMacCtrlInfoReq (params);
202 }
203 
204 void
205 PfSchedulerMemberSchedSapProvider::SchedUlCqiInfoReq (const struct SchedUlCqiInfoReqParameters& params)
206 {
207  m_scheduler->DoSchedUlCqiInfoReq (params);
208 }
209 
210 
211 
212 
213 
215  : m_cschedSapUser (0),
216  m_schedSapUser (0),
217  m_timeWindow (99.0),
218  m_nextRntiUl (0)
219 {
220  m_amc = CreateObject <LteAmc> ();
221  m_cschedSapProvider = new PfSchedulerMemberCschedSapProvider (this);
222  m_schedSapProvider = new PfSchedulerMemberSchedSapProvider (this);
223 }
224 
226 {
227  NS_LOG_FUNCTION (this);
228 }
229 
230 void
232 {
233  NS_LOG_FUNCTION (this);
234  m_dlHarqProcessesDciBuffer.clear ();
235  m_dlHarqProcessesTimer.clear ();
236  m_dlHarqProcessesRlcPduListBuffer.clear ();
237  m_dlInfoListBuffered.clear ();
238  m_ulHarqCurrentProcessId.clear ();
239  m_ulHarqProcessesStatus.clear ();
240  m_ulHarqProcessesDciBuffer.clear ();
241  delete m_cschedSapProvider;
242  delete m_schedSapProvider;
243 }
244 
245 TypeId
246 PfFfMacScheduler::GetTypeId (void)
247 {
248  static TypeId tid = TypeId ("ns3::PfFfMacScheduler")
250  .AddConstructor<PfFfMacScheduler> ()
251  .AddAttribute ("CqiTimerThreshold",
252  "The number of TTIs a CQI is valid (default 1000 - 1 sec.)",
253  UintegerValue (1000),
254  MakeUintegerAccessor (&PfFfMacScheduler::m_cqiTimersThreshold),
255  MakeUintegerChecker<uint32_t> ())
256  .AddAttribute ("HarqEnabled",
257  "Activate/Deactivate the HARQ [by default is active].",
258  BooleanValue (true),
259  MakeBooleanAccessor (&PfFfMacScheduler::m_harqOn),
260  MakeBooleanChecker ())
261  .AddAttribute ("UlGrantMcs",
262  "The MCS of the UL grant, must be [0..15] (default 0)",
263  UintegerValue (0),
264  MakeUintegerAccessor (&PfFfMacScheduler::m_ulGrantMcs),
265  MakeUintegerChecker<uint8_t> ())
266  ;
267  return tid;
268 }
269 
270 
271 
272 void
274 {
275  m_cschedSapUser = s;
276 }
277 
278 void
280 {
281  m_schedSapUser = s;
282 }
283 
286 {
287  return m_cschedSapProvider;
288 }
289 
292 {
293  return m_schedSapProvider;
294 }
295 
296 void
297 PfFfMacScheduler::DoCschedCellConfigReq (const struct FfMacCschedSapProvider::CschedCellConfigReqParameters& params)
298 {
299  NS_LOG_FUNCTION (this);
300  // Read the subset of parameters used
301  m_cschedCellConfig = params;
302  m_rachAllocationMap.resize (m_cschedCellConfig.m_ulBandwidth, 0);
304  cnf.m_result = SUCCESS;
305  m_cschedSapUser->CschedUeConfigCnf (cnf);
306  return;
307 }
308 
309 void
310 PfFfMacScheduler::DoCschedUeConfigReq (const struct FfMacCschedSapProvider::CschedUeConfigReqParameters& params)
311 {
312  NS_LOG_FUNCTION (this << " RNTI " << params.m_rnti << " txMode " << (uint16_t)params.m_transmissionMode);
313  std::map <uint16_t,uint8_t>::iterator it = m_uesTxMode.find (params.m_rnti);
314  if (it == m_uesTxMode.end ())
315  {
316  m_uesTxMode.insert (std::pair <uint16_t, double> (params.m_rnti, params.m_transmissionMode));
317  // generate HARQ buffers
318  m_dlHarqCurrentProcessId.insert (std::pair <uint16_t,uint8_t > (params.m_rnti, 0));
319  DlHarqProcessesStatus_t dlHarqPrcStatus;
320  dlHarqPrcStatus.resize (8,0);
321  m_dlHarqProcessesStatus.insert (std::pair <uint16_t, DlHarqProcessesStatus_t> (params.m_rnti, dlHarqPrcStatus));
322  DlHarqProcessesTimer_t dlHarqProcessesTimer;
323  dlHarqProcessesTimer.resize (8,0);
324  m_dlHarqProcessesTimer.insert (std::pair <uint16_t, DlHarqProcessesTimer_t> (params.m_rnti, dlHarqProcessesTimer));
325  DlHarqProcessesDciBuffer_t dlHarqdci;
326  dlHarqdci.resize (8);
327  m_dlHarqProcessesDciBuffer.insert (std::pair <uint16_t, DlHarqProcessesDciBuffer_t> (params.m_rnti, dlHarqdci));
328  DlHarqRlcPduListBuffer_t dlHarqRlcPdu;
329  dlHarqRlcPdu.resize (2);
330  dlHarqRlcPdu.at (0).resize (8);
331  dlHarqRlcPdu.at (1).resize (8);
332  m_dlHarqProcessesRlcPduListBuffer.insert (std::pair <uint16_t, DlHarqRlcPduListBuffer_t> (params.m_rnti, dlHarqRlcPdu));
333  m_ulHarqCurrentProcessId.insert (std::pair <uint16_t,uint8_t > (params.m_rnti, 0));
334  UlHarqProcessesStatus_t ulHarqPrcStatus;
335  ulHarqPrcStatus.resize (8,0);
336  m_ulHarqProcessesStatus.insert (std::pair <uint16_t, UlHarqProcessesStatus_t> (params.m_rnti, ulHarqPrcStatus));
337  UlHarqProcessesDciBuffer_t ulHarqdci;
338  ulHarqdci.resize (8);
339  m_ulHarqProcessesDciBuffer.insert (std::pair <uint16_t, UlHarqProcessesDciBuffer_t> (params.m_rnti, ulHarqdci));
340  }
341  else
342  {
343  (*it).second = params.m_transmissionMode;
344  }
345  return;
346 }
347 
348 void
349 PfFfMacScheduler::DoCschedLcConfigReq (const struct FfMacCschedSapProvider::CschedLcConfigReqParameters& params)
350 {
351  NS_LOG_FUNCTION (this << " New LC, rnti: " << params.m_rnti);
352 
353  std::map <uint16_t, pfsFlowPerf_t>::iterator it;
354  for (uint16_t i = 0; i < params.m_logicalChannelConfigList.size (); i++)
355  {
356  it = m_flowStatsDl.find (params.m_rnti);
357 
358  if (it == m_flowStatsDl.end ())
359  {
360  pfsFlowPerf_t flowStatsDl;
361  flowStatsDl.flowStart = Simulator::Now ();
362  flowStatsDl.totalBytesTransmitted = 0;
363  flowStatsDl.lastTtiBytesTrasmitted = 0;
364  flowStatsDl.lastAveragedThroughput = 1;
365  m_flowStatsDl.insert (std::pair<uint16_t, pfsFlowPerf_t> (params.m_rnti, flowStatsDl));
366  pfsFlowPerf_t flowStatsUl;
367  flowStatsUl.flowStart = Simulator::Now ();
368  flowStatsUl.totalBytesTransmitted = 0;
369  flowStatsUl.lastTtiBytesTrasmitted = 0;
370  flowStatsUl.lastAveragedThroughput = 1;
371  m_flowStatsUl.insert (std::pair<uint16_t, pfsFlowPerf_t> (params.m_rnti, flowStatsUl));
372  }
373  else
374  {
375  NS_LOG_ERROR ("RNTI already exists");
376  }
377  }
378 
379  return;
380 }
381 
382 void
383 PfFfMacScheduler::DoCschedLcReleaseReq (const struct FfMacCschedSapProvider::CschedLcReleaseReqParameters& params)
384 {
385  NS_LOG_FUNCTION (this);
386  for (uint16_t i = 0; i < params.m_logicalChannelIdentity.size (); i++)
387  {
388  std::map<LteFlowId_t, FfMacSchedSapProvider::SchedDlRlcBufferReqParameters>::iterator it = m_rlcBufferReq.begin ();
389  std::map<LteFlowId_t, FfMacSchedSapProvider::SchedDlRlcBufferReqParameters>::iterator temp;
390  while (it!=m_rlcBufferReq.end ())
391  {
392  if (((*it).first.m_rnti == params.m_rnti) && ((*it).first.m_lcId == params.m_logicalChannelIdentity.at (i)))
393  {
394  temp = it;
395  it++;
396  m_rlcBufferReq.erase (temp);
397  }
398  else
399  {
400  it++;
401  }
402  }
403  }
404  return;
405 }
406 
407 void
408 PfFfMacScheduler::DoCschedUeReleaseReq (const struct FfMacCschedSapProvider::CschedUeReleaseReqParameters& params)
409 {
410  NS_LOG_FUNCTION (this);
411 
412  m_uesTxMode.erase (params.m_rnti);
413  m_dlHarqCurrentProcessId.erase (params.m_rnti);
414  m_dlHarqProcessesStatus.erase (params.m_rnti);
415  m_dlHarqProcessesTimer.erase (params.m_rnti);
416  m_dlHarqProcessesDciBuffer.erase (params.m_rnti);
417  m_dlHarqProcessesRlcPduListBuffer.erase (params.m_rnti);
418  m_ulHarqCurrentProcessId.erase (params.m_rnti);
419  m_ulHarqProcessesStatus.erase (params.m_rnti);
420  m_ulHarqProcessesDciBuffer.erase (params.m_rnti);
421  m_flowStatsDl.erase (params.m_rnti);
422  m_flowStatsUl.erase (params.m_rnti);
423  m_ceBsrRxed.erase (params.m_rnti);
424  std::map<LteFlowId_t, FfMacSchedSapProvider::SchedDlRlcBufferReqParameters>::iterator it = m_rlcBufferReq.begin ();
425  std::map<LteFlowId_t, FfMacSchedSapProvider::SchedDlRlcBufferReqParameters>::iterator temp;
426  while (it!=m_rlcBufferReq.end ())
427  {
428  if ((*it).first.m_rnti == params.m_rnti)
429  {
430  temp = it;
431  it++;
432  m_rlcBufferReq.erase (temp);
433  }
434  else
435  {
436  it++;
437  }
438  }
439  if (m_nextRntiUl == params.m_rnti)
440  {
441  m_nextRntiUl = 0;
442  }
443 
444  return;
445 }
446 
447 
448 void
449 PfFfMacScheduler::DoSchedDlRlcBufferReq (const struct FfMacSchedSapProvider::SchedDlRlcBufferReqParameters& params)
450 {
451  NS_LOG_FUNCTION (this << params.m_rnti << (uint32_t) params.m_logicalChannelIdentity);
452  // API generated by RLC for updating RLC parameters on a LC (tx and retx queues)
453 
454  std::map <LteFlowId_t, FfMacSchedSapProvider::SchedDlRlcBufferReqParameters>::iterator it;
455 
456  LteFlowId_t flow (params.m_rnti, params.m_logicalChannelIdentity);
457 
458  it = m_rlcBufferReq.find (flow);
459 
460  if (it == m_rlcBufferReq.end ())
461  {
462  m_rlcBufferReq.insert (std::pair <LteFlowId_t, FfMacSchedSapProvider::SchedDlRlcBufferReqParameters> (flow, params));
463  }
464  else
465  {
466  (*it).second = params;
467  }
468 
469  return;
470 }
471 
472 void
473 PfFfMacScheduler::DoSchedDlPagingBufferReq (const struct FfMacSchedSapProvider::SchedDlPagingBufferReqParameters& params)
474 {
475  NS_LOG_FUNCTION (this);
476  NS_FATAL_ERROR ("method not implemented");
477  return;
478 }
479 
480 void
481 PfFfMacScheduler::DoSchedDlMacBufferReq (const struct FfMacSchedSapProvider::SchedDlMacBufferReqParameters& params)
482 {
483  NS_LOG_FUNCTION (this);
484  NS_FATAL_ERROR ("method not implemented");
485  return;
486 }
487 
488 int
489 PfFfMacScheduler::GetRbgSize (int dlbandwidth)
490 {
491  for (int i = 0; i < 4; i++)
492  {
493  if (dlbandwidth < PfType0AllocationRbg[i])
494  {
495  return (i + 1);
496  }
497  }
498 
499  return (-1);
500 }
501 
502 
503 int
504 PfFfMacScheduler::LcActivePerFlow (uint16_t rnti)
505 {
506  std::map <LteFlowId_t, FfMacSchedSapProvider::SchedDlRlcBufferReqParameters>::iterator it;
507  int lcActive = 0;
508  for (it = m_rlcBufferReq.begin (); it != m_rlcBufferReq.end (); it++)
509  {
510  if (((*it).first.m_rnti == rnti) && (((*it).second.m_rlcTransmissionQueueSize > 0)
511  || ((*it).second.m_rlcRetransmissionQueueSize > 0)
512  || ((*it).second.m_rlcStatusPduSize > 0) ))
513  {
514  lcActive++;
515  }
516  if ((*it).first.m_rnti > rnti)
517  {
518  break;
519  }
520  }
521  return (lcActive);
522 
523 }
524 
525 
526 uint8_t
528 {
529  NS_LOG_FUNCTION (this << rnti);
530 
531  std::map <uint16_t, uint8_t>::iterator it = m_dlHarqCurrentProcessId.find (rnti);
532  if (it == m_dlHarqCurrentProcessId.end ())
533  {
534  NS_FATAL_ERROR ("No Process Id found for this RNTI " << rnti);
535  }
536  std::map <uint16_t, DlHarqProcessesStatus_t>::iterator itStat = m_dlHarqProcessesStatus.find (rnti);
537  if (itStat == m_dlHarqProcessesStatus.end ())
538  {
539  NS_FATAL_ERROR ("No Process Id Statusfound for this RNTI " << rnti);
540  }
541  uint8_t i = (*it).second;
542  do
543  {
544  i = (i + 1) % HARQ_PROC_NUM;
545  }
546  while ( ((*itStat).second.at (i) != 0)&&(i != (*it).second));
547  if ((*itStat).second.at (i) == 0)
548  {
549  return (true);
550  }
551  else
552  {
553  return (false); // return a not valid harq proc id
554  }
555 }
556 
557 
558 
559 uint8_t
561 {
562  NS_LOG_FUNCTION (this << rnti);
563 
564  if (m_harqOn == false)
565  {
566  return (0);
567  }
568 
569 
570  std::map <uint16_t, uint8_t>::iterator it = m_dlHarqCurrentProcessId.find (rnti);
571  if (it == m_dlHarqCurrentProcessId.end ())
572  {
573  NS_FATAL_ERROR ("No Process Id found for this RNTI " << rnti);
574  }
575  std::map <uint16_t, DlHarqProcessesStatus_t>::iterator itStat = m_dlHarqProcessesStatus.find (rnti);
576  if (itStat == m_dlHarqProcessesStatus.end ())
577  {
578  NS_FATAL_ERROR ("No Process Id Statusfound for this RNTI " << rnti);
579  }
580  uint8_t i = (*it).second;
581  do
582  {
583  i = (i + 1) % HARQ_PROC_NUM;
584  }
585  while ( ((*itStat).second.at (i) != 0)&&(i != (*it).second));
586  if ((*itStat).second.at (i) == 0)
587  {
588  (*it).second = i;
589  (*itStat).second.at (i) = 1;
590  }
591  else
592  {
593  NS_FATAL_ERROR ("No HARQ process available for RNTI " << rnti << " check before update with HarqProcessAvailability");
594  }
595 
596  return ((*it).second);
597 }
598 
599 
600 void
602 {
603  NS_LOG_FUNCTION (this);
604 
605  std::map <uint16_t, DlHarqProcessesTimer_t>::iterator itTimers;
606  for (itTimers = m_dlHarqProcessesTimer.begin (); itTimers != m_dlHarqProcessesTimer.end (); itTimers ++)
607  {
608  for (uint16_t i = 0; i < HARQ_PROC_NUM; i++)
609  {
610  if ((*itTimers).second.at (i) == HARQ_DL_TIMEOUT)
611  {
612  // reset HARQ process
613 
614  NS_LOG_DEBUG (this << " Reset HARQ proc " << i << " for RNTI " << (*itTimers).first);
615  std::map <uint16_t, DlHarqProcessesStatus_t>::iterator itStat = m_dlHarqProcessesStatus.find ((*itTimers).first);
616  if (itStat == m_dlHarqProcessesStatus.end ())
617  {
618  NS_FATAL_ERROR ("No Process Id Status found for this RNTI " << (*itTimers).first);
619  }
620  (*itStat).second.at (i) = 0;
621  (*itTimers).second.at (i) = 0;
622  }
623  else
624  {
625  (*itTimers).second.at (i)++;
626  }
627  }
628  }
629 
630 }
631 
632 
633 void
634 PfFfMacScheduler::DoSchedDlTriggerReq (const struct FfMacSchedSapProvider::SchedDlTriggerReqParameters& params)
635 {
636  NS_LOG_FUNCTION (this << " Frame no. " << (params.m_sfnSf >> 4) << " subframe no. " << (0xF & params.m_sfnSf));
637  // API generated by RLC for triggering the scheduling of a DL subframe
638 
639 
640  // evaluate the relative channel quality indicator for each UE per each RBG
641  // (since we are using allocation type 0 the small unit of allocation is RBG)
642  // Resource allocation type 0 (see sec 7.1.6.1 of 36.213)
643 
644  RefreshDlCqiMaps ();
645 
646  int rbgSize = GetRbgSize (m_cschedCellConfig.m_dlBandwidth);
647  int rbgNum = m_cschedCellConfig.m_dlBandwidth / rbgSize;
648  std::map <uint16_t, std::vector <uint16_t> > allocationMap; // RBs map per RNTI
649  std::vector <bool> rbgMap; // global RBGs map
650  uint16_t rbgAllocatedNum = 0;
651  std::set <uint16_t> rntiAllocated;
652  rbgMap.resize (m_cschedCellConfig.m_dlBandwidth / rbgSize, false);
654 
655  // RACH Allocation
656  m_rachAllocationMap.resize (m_cschedCellConfig.m_ulBandwidth, 0);
657  uint16_t rbStart = 0;
658  std::vector <struct RachListElement_s>::iterator itRach;
659  for (itRach = m_rachList.begin (); itRach != m_rachList.end (); itRach++)
660  {
661  NS_ASSERT_MSG (m_amc->GetTbSizeFromMcs (m_ulGrantMcs, m_cschedCellConfig.m_ulBandwidth) > (*itRach).m_estimatedSize, " Default UL Grant MCS does not allow to send RACH messages");
662  BuildRarListElement_s newRar;
663  newRar.m_rnti = (*itRach).m_rnti;
664  // DL-RACH Allocation
665  // Ideal: no needs of configuring m_dci
666  // UL-RACH Allocation
667  newRar.m_grant.m_rnti = newRar.m_rnti;
668  newRar.m_grant.m_mcs = m_ulGrantMcs;
669  uint16_t rbLen = 1;
670  uint16_t tbSizeBits = 0;
671  // find lowest TB size that fits UL grant estimated size
672  while ((tbSizeBits < (*itRach).m_estimatedSize) && (rbStart + rbLen < m_cschedCellConfig.m_ulBandwidth))
673  {
674  rbLen++;
675  tbSizeBits = m_amc->GetTbSizeFromMcs (m_ulGrantMcs, rbLen);
676  }
677  if (tbSizeBits < (*itRach).m_estimatedSize)
678  {
679  // no more allocation space: finish allocation
680  break;
681  }
682  newRar.m_grant.m_rbStart = rbStart;
683  newRar.m_grant.m_rbLen = rbLen;
684  newRar.m_grant.m_tbSize = tbSizeBits / 8;
685  newRar.m_grant.m_hopping = false;
686  newRar.m_grant.m_tpc = 0;
687  newRar.m_grant.m_cqiRequest = false;
688  newRar.m_grant.m_ulDelay = false;
689  NS_LOG_INFO (this << " UL grant allocated to RNTI " << (*itRach).m_rnti << " rbStart " << rbStart << " rbLen " << rbLen << " MCS " << m_ulGrantMcs << " tbSize " << newRar.m_grant.m_tbSize);
690  for (uint16_t i = rbStart; i < rbStart + rbLen; i++)
691  {
692  m_rachAllocationMap.at (i) = (*itRach).m_rnti;
693  }
694  rbStart = rbStart + rbLen;
695 
696  ret.m_buildRarList.push_back (newRar);
697  }
698  m_rachList.clear ();
699 
700 
701  // Process DL HARQ feedback
703  // retrieve past HARQ retx buffered
704  if (m_dlInfoListBuffered.size () > 0)
705  {
706  if (params.m_dlInfoList.size () > 0)
707  {
708  NS_LOG_INFO (this << " Received DL-HARQ feedback");
709  m_dlInfoListBuffered.insert (m_dlInfoListBuffered.end (), params.m_dlInfoList.begin (), params.m_dlInfoList.end ());
710  }
711  }
712  else
713  {
714  if (params.m_dlInfoList.size () > 0)
715  {
716  m_dlInfoListBuffered = params.m_dlInfoList;
717  }
718  }
719  if (m_harqOn == false)
720  {
721  // Ignore HARQ feedback
722  m_dlInfoListBuffered.clear ();
723  }
724  std::vector <struct DlInfoListElement_s> dlInfoListUntxed;
725  for (uint16_t i = 0; i < m_dlInfoListBuffered.size (); i++)
726  {
727  std::set <uint16_t>::iterator itRnti = rntiAllocated.find (m_dlInfoListBuffered.at (i).m_rnti);
728  if (itRnti != rntiAllocated.end ())
729  {
730  // RNTI already allocated for retx
731  continue;
732  }
733  uint8_t nLayers = m_dlInfoListBuffered.at (i).m_harqStatus.size ();
734  std::vector <bool> retx;
735  NS_LOG_INFO (this << " Processing DLHARQ feedback");
736  if (nLayers == 1)
737  {
738  retx.push_back (m_dlInfoListBuffered.at (i).m_harqStatus.at (0) == DlInfoListElement_s::NACK);
739  retx.push_back (false);
740  }
741  else
742  {
743  retx.push_back (m_dlInfoListBuffered.at (i).m_harqStatus.at (0) == DlInfoListElement_s::NACK);
744  retx.push_back (m_dlInfoListBuffered.at (i).m_harqStatus.at (1) == DlInfoListElement_s::NACK);
745  }
746  if (retx.at (0) || retx.at (1))
747  {
748  // retrieve HARQ process information
749  uint16_t rnti = m_dlInfoListBuffered.at (i).m_rnti;
750  uint8_t harqId = m_dlInfoListBuffered.at (i).m_harqProcessId;
751  NS_LOG_INFO (this << " HARQ retx RNTI " << rnti << " harqId " << (uint16_t)harqId);
752  std::map <uint16_t, DlHarqProcessesDciBuffer_t>::iterator itHarq = m_dlHarqProcessesDciBuffer.find (rnti);
753  if (itHarq == m_dlHarqProcessesDciBuffer.end ())
754  {
755  NS_FATAL_ERROR ("No info find in HARQ buffer for UE " << rnti);
756  }
757 
758  DlDciListElement_s dci = (*itHarq).second.at (harqId);
759  int rv = 0;
760  if (dci.m_rv.size () == 1)
761  {
762  rv = dci.m_rv.at (0);
763  }
764  else
765  {
766  rv = (dci.m_rv.at (0) > dci.m_rv.at (1) ? dci.m_rv.at (0) : dci.m_rv.at (1));
767  }
768 
769  if (rv == 3)
770  {
771  // maximum number of retx reached -> drop process
772  NS_LOG_INFO ("Maximum number of retransmissions reached -> drop process");
773  std::map <uint16_t, DlHarqProcessesStatus_t>::iterator it = m_dlHarqProcessesStatus.find (rnti);
774  if (it == m_dlHarqProcessesStatus.end ())
775  {
776  NS_LOG_ERROR ("No info find in HARQ buffer for UE (might change eNB) " << m_dlInfoListBuffered.at (i).m_rnti);
777  }
778  (*it).second.at (harqId) = 0;
779  std::map <uint16_t, DlHarqRlcPduListBuffer_t>::iterator itRlcPdu = m_dlHarqProcessesRlcPduListBuffer.find (rnti);
780  if (itRlcPdu == m_dlHarqProcessesRlcPduListBuffer.end ())
781  {
782  NS_FATAL_ERROR ("Unable to find RlcPdcList in HARQ buffer for RNTI " << m_dlInfoListBuffered.at (i).m_rnti);
783  }
784  for (uint16_t k = 0; k < (*itRlcPdu).second.size (); k++)
785  {
786  (*itRlcPdu).second.at (k).at (harqId).clear ();
787  }
788  continue;
789  }
790  // check the feasibility of retransmitting on the same RBGs
791  // translate the DCI to Spectrum framework
792  std::vector <int> dciRbg;
793  uint32_t mask = 0x1;
794  NS_LOG_INFO ("Original RBGs " << dci.m_rbBitmap << " rnti " << dci.m_rnti);
795  for (int j = 0; j < 32; j++)
796  {
797  if (((dci.m_rbBitmap & mask) >> j) == 1)
798  {
799  dciRbg.push_back (j);
800  NS_LOG_INFO ("\t" << j);
801  }
802  mask = (mask << 1);
803  }
804  bool free = true;
805  for (uint8_t j = 0; j < dciRbg.size (); j++)
806  {
807  if (rbgMap.at (dciRbg.at (j)) == true)
808  {
809  free = false;
810  break;
811  }
812  }
813  if (free)
814  {
815  // use the same RBGs for the retx
816  // reserve RBGs
817  for (uint8_t j = 0; j < dciRbg.size (); j++)
818  {
819  rbgMap.at (dciRbg.at (j)) = true;
820  NS_LOG_INFO ("RBG " << dciRbg.at (j) << " assigned");
821  rbgAllocatedNum++;
822  }
823 
824  NS_LOG_INFO (this << " Send retx in the same RBGs");
825  }
826  else
827  {
828  // find RBGs for sending HARQ retx
829  uint8_t j = 0;
830  uint8_t rbgId = (dciRbg.at (dciRbg.size () - 1) + 1) % rbgNum;
831  uint8_t startRbg = dciRbg.at (dciRbg.size () - 1);
832  std::vector <bool> rbgMapCopy = rbgMap;
833  while ((j < dciRbg.size ())&&(startRbg != rbgId))
834  {
835  if (rbgMapCopy.at (rbgId) == false)
836  {
837  rbgMapCopy.at (rbgId) = true;
838  dciRbg.at (j) = rbgId;
839  j++;
840  }
841  rbgId++;
842  }
843  if (j == dciRbg.size ())
844  {
845  // find new RBGs -> update DCI map
846  uint32_t rbgMask = 0;
847  for (uint16_t k = 0; k < dciRbg.size (); k++)
848  {
849  rbgMask = rbgMask + (0x1 << dciRbg.at (k));
850  rbgAllocatedNum++;
851  }
852  dci.m_rbBitmap = rbgMask;
853  rbgMap = rbgMapCopy;
854  NS_LOG_INFO (this << " Move retx in RBGs " << dciRbg.size ());
855  }
856  else
857  {
858  // HARQ retx cannot be performed on this TTI -> store it
859  dlInfoListUntxed.push_back (params.m_dlInfoList.at (i));
860  NS_LOG_INFO (this << " No resource for this retx -> buffer it");
861  }
862  }
863  // retrieve RLC PDU list for retx TBsize and update DCI
864  BuildDataListElement_s newEl;
865  std::map <uint16_t, DlHarqRlcPduListBuffer_t>::iterator itRlcPdu = m_dlHarqProcessesRlcPduListBuffer.find (rnti);
866  if (itRlcPdu == m_dlHarqProcessesRlcPduListBuffer.end ())
867  {
868  NS_FATAL_ERROR ("Unable to find RlcPdcList in HARQ buffer for RNTI " << rnti);
869  }
870  for (uint8_t j = 0; j < nLayers; j++)
871  {
872  if (retx.at (j))
873  {
874  if (j >= dci.m_ndi.size ())
875  {
876  // for avoiding errors in MIMO transient phases
877  dci.m_ndi.push_back (0);
878  dci.m_rv.push_back (0);
879  dci.m_mcs.push_back (0);
880  dci.m_tbsSize.push_back (0);
881  NS_LOG_INFO (this << " layer " << (uint16_t)j << " no txed (MIMO transition)");
882  }
883  else
884  {
885  dci.m_ndi.at (j) = 0;
886  dci.m_rv.at (j)++;
887  (*itHarq).second.at (harqId).m_rv.at (j)++;
888  NS_LOG_INFO (this << " layer " << (uint16_t)j << " RV " << (uint16_t)dci.m_rv.at (j));
889  }
890  }
891  else
892  {
893  // empty TB of layer j
894  dci.m_ndi.at (j) = 0;
895  dci.m_rv.at (j) = 0;
896  dci.m_mcs.at (j) = 0;
897  dci.m_tbsSize.at (j) = 0;
898  NS_LOG_INFO (this << " layer " << (uint16_t)j << " no retx");
899  }
900  }
901  for (uint16_t k = 0; k < (*itRlcPdu).second.at (0).at (dci.m_harqProcess).size (); k++)
902  {
903  std::vector <struct RlcPduListElement_s> rlcPduListPerLc;
904  for (uint8_t j = 0; j < nLayers; j++)
905  {
906  if (retx.at (j))
907  {
908  if (j < dci.m_ndi.size ())
909  {
910  rlcPduListPerLc.push_back ((*itRlcPdu).second.at (j).at (dci.m_harqProcess).at (k));
911  }
912  }
913  }
914 
915  if (rlcPduListPerLc.size () > 0)
916  {
917  newEl.m_rlcPduList.push_back (rlcPduListPerLc);
918  }
919  }
920  newEl.m_rnti = rnti;
921  newEl.m_dci = dci;
922  (*itHarq).second.at (harqId).m_rv = dci.m_rv;
923  // refresh timer
924  std::map <uint16_t, DlHarqProcessesTimer_t>::iterator itHarqTimer = m_dlHarqProcessesTimer.find (rnti);
925  if (itHarqTimer== m_dlHarqProcessesTimer.end ())
926  {
927  NS_FATAL_ERROR ("Unable to find HARQ timer for RNTI " << (uint16_t)rnti);
928  }
929  (*itHarqTimer).second.at (harqId) = 0;
930  ret.m_buildDataList.push_back (newEl);
931  rntiAllocated.insert (rnti);
932  }
933  else
934  {
935  // update HARQ process status
936  NS_LOG_INFO (this << " HARQ received ACK for UE " << m_dlInfoListBuffered.at (i).m_rnti);
937  std::map <uint16_t, DlHarqProcessesStatus_t>::iterator it = m_dlHarqProcessesStatus.find (m_dlInfoListBuffered.at (i).m_rnti);
938  if (it == m_dlHarqProcessesStatus.end ())
939  {
940  NS_FATAL_ERROR ("No info find in HARQ buffer for UE " << m_dlInfoListBuffered.at (i).m_rnti);
941  }
942  (*it).second.at (m_dlInfoListBuffered.at (i).m_harqProcessId) = 0;
943  std::map <uint16_t, DlHarqRlcPduListBuffer_t>::iterator itRlcPdu = m_dlHarqProcessesRlcPduListBuffer.find (m_dlInfoListBuffered.at (i).m_rnti);
944  if (itRlcPdu == m_dlHarqProcessesRlcPduListBuffer.end ())
945  {
946  NS_FATAL_ERROR ("Unable to find RlcPdcList in HARQ buffer for RNTI " << m_dlInfoListBuffered.at (i).m_rnti);
947  }
948  for (uint16_t k = 0; k < (*itRlcPdu).second.size (); k++)
949  {
950  (*itRlcPdu).second.at (k).at (m_dlInfoListBuffered.at (i).m_harqProcessId).clear ();
951  }
952  }
953  }
954  m_dlInfoListBuffered.clear ();
955  m_dlInfoListBuffered = dlInfoListUntxed;
956 
957 
958 
959  for (int i = 0; i < rbgNum; i++)
960  {
961  NS_LOG_INFO (this << " ALLOCATION for RBG " << i << " of " << rbgNum);
962  if (rbgMap.at (i) == false)
963  {
964  std::map <uint16_t, pfsFlowPerf_t>::iterator it;
965  std::map <uint16_t, pfsFlowPerf_t>::iterator itMax = m_flowStatsDl.end ();
966  double rcqiMax = 0.0;
967  for (it = m_flowStatsDl.begin (); it != m_flowStatsDl.end (); it++)
968  {
969  std::set <uint16_t>::iterator itRnti = rntiAllocated.find ((*it).first);
970  if ((itRnti != rntiAllocated.end ())||(!HarqProcessAvailability ((*it).first)))
971  {
972  // UE already allocated for HARQ or without HARQ process available -> drop it
973  if (itRnti != rntiAllocated.end ())
974  {
975  NS_LOG_DEBUG (this << " RNTI discared for HARQ tx" << (uint16_t)(*it).first);
976  }
977  if (!HarqProcessAvailability ((*it).first))
978  {
979  NS_LOG_DEBUG (this << " RNTI discared for HARQ id" << (uint16_t)(*it).first);
980  }
981  continue;
982  }
983  std::map <uint16_t,SbMeasResult_s>::iterator itCqi;
984  itCqi = m_a30CqiRxed.find ((*it).first);
985  std::map <uint16_t,uint8_t>::iterator itTxMode;
986  itTxMode = m_uesTxMode.find ((*it).first);
987  if (itTxMode == m_uesTxMode.end ())
988  {
989  NS_FATAL_ERROR ("No Transmission Mode info on user " << (*it).first);
990  }
991  int nLayer = TransmissionModesLayers::TxMode2LayerNum ((*itTxMode).second);
992  std::vector <uint8_t> sbCqi;
993  if (itCqi == m_a30CqiRxed.end ())
994  {
995  for (uint8_t k = 0; k < nLayer; k++)
996  {
997  sbCqi.push_back (1); // start with lowest value
998  }
999  }
1000  else
1001  {
1002  sbCqi = (*itCqi).second.m_higherLayerSelected.at (i).m_sbCqi;
1003  }
1004  uint8_t cqi1 = sbCqi.at (0);
1005  uint8_t cqi2 = 1;
1006  if (sbCqi.size () > 1)
1007  {
1008  cqi2 = sbCqi.at (1);
1009  }
1010 
1011  if ((cqi1 > 0)||(cqi2 > 0)) // CQI == 0 means "out of range" (see table 7.2.3-1 of 36.213)
1012  {
1013  if (LcActivePerFlow ((*it).first) > 0)
1014  {
1015  // this UE has data to transmit
1016  double achievableRate = 0.0;
1017  uint8_t mcs = 0;
1018  for (uint8_t k = 0; k < nLayer; k++)
1019  {
1020  if (sbCqi.size () > k)
1021  {
1022  mcs = m_amc->GetMcsFromCqi (sbCqi.at (k));
1023  }
1024  else
1025  {
1026  // no info on this subband -> worst MCS
1027  mcs = 0;
1028  }
1029  achievableRate += ((m_amc->GetTbSizeFromMcs (mcs, rbgSize) / 8) / 0.001); // = TB size / TTI
1030  }
1031 
1032  double rcqi = achievableRate / (*it).second.lastAveragedThroughput;
1033  NS_LOG_INFO (this << " RNTI " << (*it).first << " MCS " << (uint32_t)mcs << " achievableRate " << achievableRate << " avgThr " << (*it).second.lastAveragedThroughput << " RCQI " << rcqi);
1034 
1035  if (rcqi > rcqiMax)
1036  {
1037  rcqiMax = rcqi;
1038  itMax = it;
1039  }
1040  }
1041  } // end if cqi
1042  } // end for m_rlcBufferReq
1043 
1044  if (itMax == m_flowStatsDl.end ())
1045  {
1046  // no UE available for this RB
1047  NS_LOG_INFO (this << " any UE found");
1048  }
1049  else
1050  {
1051  rbgMap.at (i) = true;
1052  std::map <uint16_t, std::vector <uint16_t> >::iterator itMap;
1053  itMap = allocationMap.find ((*itMax).first);
1054  if (itMap == allocationMap.end ())
1055  {
1056  // insert new element
1057  std::vector <uint16_t> tempMap;
1058  tempMap.push_back (i);
1059  allocationMap.insert (std::pair <uint16_t, std::vector <uint16_t> > ((*itMax).first, tempMap));
1060  }
1061  else
1062  {
1063  (*itMap).second.push_back (i);
1064  }
1065  NS_LOG_INFO (this << " UE assigned " << (*itMax).first);
1066  }
1067  } // end for RBG free
1068  } // end for RBGs
1069 
1070  // reset TTI stats of users
1071  std::map <uint16_t, pfsFlowPerf_t>::iterator itStats;
1072  for (itStats = m_flowStatsDl.begin (); itStats != m_flowStatsDl.end (); itStats++)
1073  {
1074  (*itStats).second.lastTtiBytesTrasmitted = 0;
1075  }
1076 
1077  // generate the transmission opportunities by grouping the RBGs of the same RNTI and
1078  // creating the correspondent DCIs
1079  std::map <uint16_t, std::vector <uint16_t> >::iterator itMap = allocationMap.begin ();
1080  while (itMap != allocationMap.end ())
1081  {
1082  // create new BuildDataListElement_s for this LC
1083  BuildDataListElement_s newEl;
1084  newEl.m_rnti = (*itMap).first;
1085  // create the DlDciListElement_s
1086  DlDciListElement_s newDci;
1087  newDci.m_rnti = (*itMap).first;
1088  newDci.m_harqProcess = UpdateHarqProcessId ((*itMap).first);
1089 
1090  uint16_t lcActives = LcActivePerFlow ((*itMap).first);
1091  NS_LOG_INFO (this << "Allocate user " << newEl.m_rnti << " rbg " << lcActives);
1092  uint16_t RgbPerRnti = (*itMap).second.size ();
1093  std::map <uint16_t,SbMeasResult_s>::iterator itCqi;
1094  itCqi = m_a30CqiRxed.find ((*itMap).first);
1095  std::map <uint16_t,uint8_t>::iterator itTxMode;
1096  itTxMode = m_uesTxMode.find ((*itMap).first);
1097  if (itTxMode == m_uesTxMode.end ())
1098  {
1099  NS_FATAL_ERROR ("No Transmission Mode info on user " << (*itMap).first);
1100  }
1101  int nLayer = TransmissionModesLayers::TxMode2LayerNum ((*itTxMode).second);
1102  std::vector <uint8_t> worstCqi (2, 15);
1103  if (itCqi != m_a30CqiRxed.end ())
1104  {
1105  for (uint16_t k = 0; k < (*itMap).second.size (); k++)
1106  {
1107  if ((*itCqi).second.m_higherLayerSelected.size () > (*itMap).second.at (k))
1108  {
1109  NS_LOG_INFO (this << " RBG " << (*itMap).second.at (k) << " CQI " << (uint16_t)((*itCqi).second.m_higherLayerSelected.at ((*itMap).second.at (k)).m_sbCqi.at (0)) );
1110  for (uint8_t j = 0; j < nLayer; j++)
1111  {
1112  if ((*itCqi).second.m_higherLayerSelected.at ((*itMap).second.at (k)).m_sbCqi.size () > j)
1113  {
1114  if (((*itCqi).second.m_higherLayerSelected.at ((*itMap).second.at (k)).m_sbCqi.at (j)) < worstCqi.at (j))
1115  {
1116  worstCqi.at (j) = ((*itCqi).second.m_higherLayerSelected.at ((*itMap).second.at (k)).m_sbCqi.at (j));
1117  }
1118  }
1119  else
1120  {
1121  // no CQI for this layer of this suband -> worst one
1122  worstCqi.at (j) = 1;
1123  }
1124  }
1125  }
1126  else
1127  {
1128  for (uint8_t j = 0; j < nLayer; j++)
1129  {
1130  worstCqi.at (j) = 1; // try with lowest MCS in RBG with no info on channel
1131  }
1132  }
1133  }
1134  }
1135  else
1136  {
1137  for (uint8_t j = 0; j < nLayer; j++)
1138  {
1139  worstCqi.at (j) = 1; // try with lowest MCS in RBG with no info on channel
1140  }
1141  }
1142  for (uint8_t j = 0; j < nLayer; j++)
1143  {
1144  NS_LOG_INFO (this << " Layer " << (uint16_t)j << " CQI selected " << (uint16_t)worstCqi.at (j));
1145  }
1146  uint32_t bytesTxed = 0;
1147  for (uint8_t j = 0; j < nLayer; j++)
1148  {
1149  newDci.m_mcs.push_back (m_amc->GetMcsFromCqi (worstCqi.at (j)));
1150  int tbSize = (m_amc->GetTbSizeFromMcs (newDci.m_mcs.at (j), RgbPerRnti * rbgSize) / 8); // (size of TB in bytes according to table 7.1.7.2.1-1 of 36.213)
1151  newDci.m_tbsSize.push_back (tbSize);
1152  NS_LOG_INFO (this << " Layer " << (uint16_t)j << " MCS selected" << m_amc->GetMcsFromCqi (worstCqi.at (j)));
1153  bytesTxed += tbSize;
1154  }
1155 
1156  newDci.m_resAlloc = 0; // only allocation type 0 at this stage
1157  newDci.m_rbBitmap = 0; // TBD (32 bit bitmap see 7.1.6 of 36.213)
1158  uint32_t rbgMask = 0;
1159  for (uint16_t k = 0; k < (*itMap).second.size (); k++)
1160  {
1161  rbgMask = rbgMask + (0x1 << (*itMap).second.at (k));
1162  NS_LOG_INFO (this << " Allocated RBG " << (*itMap).second.at (k));
1163  }
1164  newDci.m_rbBitmap = rbgMask; // (32 bit bitmap see 7.1.6 of 36.213)
1165 
1166  // create the rlc PDUs -> equally divide resources among actives LCs
1167  std::map <LteFlowId_t, FfMacSchedSapProvider::SchedDlRlcBufferReqParameters>::iterator itBufReq;
1168  for (itBufReq = m_rlcBufferReq.begin (); itBufReq != m_rlcBufferReq.end (); itBufReq++)
1169  {
1170  if (((*itBufReq).first.m_rnti == (*itMap).first)
1171  && (((*itBufReq).second.m_rlcTransmissionQueueSize > 0)
1172  || ((*itBufReq).second.m_rlcRetransmissionQueueSize > 0)
1173  || ((*itBufReq).second.m_rlcStatusPduSize > 0) ))
1174  {
1175  std::vector <struct RlcPduListElement_s> newRlcPduLe;
1176  for (uint8_t j = 0; j < nLayer; j++)
1177  {
1178  RlcPduListElement_s newRlcEl;
1179  newRlcEl.m_logicalChannelIdentity = (*itBufReq).first.m_lcId;
1180  newRlcEl.m_size = newDci.m_tbsSize.at (j) / lcActives;
1181  NS_LOG_INFO (this << " LCID " << (uint32_t) newRlcEl.m_logicalChannelIdentity << " size " << newRlcEl.m_size << " layer " << (uint16_t)j);
1182  newRlcPduLe.push_back (newRlcEl);
1183  UpdateDlRlcBufferInfo (newDci.m_rnti, newRlcEl.m_logicalChannelIdentity, newRlcEl.m_size);
1184  if (m_harqOn == true)
1185  {
1186  // store RLC PDU list for HARQ
1187  std::map <uint16_t, DlHarqRlcPduListBuffer_t>::iterator itRlcPdu = m_dlHarqProcessesRlcPduListBuffer.find ((*itMap).first);
1188  if (itRlcPdu == m_dlHarqProcessesRlcPduListBuffer.end ())
1189  {
1190  NS_FATAL_ERROR ("Unable to find RlcPdcList in HARQ buffer for RNTI " << (*itMap).first);
1191  }
1192  (*itRlcPdu).second.at (j).at (newDci.m_harqProcess).push_back (newRlcEl);
1193  }
1194  }
1195  newEl.m_rlcPduList.push_back (newRlcPduLe);
1196  }
1197  if ((*itBufReq).first.m_rnti > (*itMap).first)
1198  {
1199  break;
1200  }
1201  }
1202  for (uint8_t j = 0; j < nLayer; j++)
1203  {
1204  newDci.m_ndi.push_back (1);
1205  newDci.m_rv.push_back (0);
1206  }
1207 
1208  newEl.m_dci = newDci;
1209 
1210  if (m_harqOn == true)
1211  {
1212  // store DCI for HARQ
1213  std::map <uint16_t, DlHarqProcessesDciBuffer_t>::iterator itDci = m_dlHarqProcessesDciBuffer.find (newEl.m_rnti);
1214  if (itDci == m_dlHarqProcessesDciBuffer.end ())
1215  {
1216  NS_FATAL_ERROR ("Unable to find RNTI entry in DCI HARQ buffer for RNTI " << newEl.m_rnti);
1217  }
1218  (*itDci).second.at (newDci.m_harqProcess) = newDci;
1219  // refresh timer
1220  std::map <uint16_t, DlHarqProcessesTimer_t>::iterator itHarqTimer = m_dlHarqProcessesTimer.find (newEl.m_rnti);
1221  if (itHarqTimer== m_dlHarqProcessesTimer.end ())
1222  {
1223  NS_FATAL_ERROR ("Unable to find HARQ timer for RNTI " << (uint16_t)newEl.m_rnti);
1224  }
1225  (*itHarqTimer).second.at (newDci.m_harqProcess) = 0;
1226  }
1227 
1228  // ...more parameters -> ingored in this version
1229 
1230  ret.m_buildDataList.push_back (newEl);
1231  // update UE stats
1232  std::map <uint16_t, pfsFlowPerf_t>::iterator it;
1233  it = m_flowStatsDl.find ((*itMap).first);
1234  if (it != m_flowStatsDl.end ())
1235  {
1236  (*it).second.lastTtiBytesTrasmitted = bytesTxed;
1237  NS_LOG_INFO (this << " UE total bytes txed " << (*it).second.lastTtiBytesTrasmitted);
1238 
1239 
1240  }
1241  else
1242  {
1243  NS_FATAL_ERROR (this << " No Stats for this allocated UE");
1244  }
1245 
1246  itMap++;
1247  } // end while allocation
1248  ret.m_nrOfPdcchOfdmSymbols = 1; // TODO: check correct value according the DCIs txed
1249 
1250 
1251  // update UEs stats
1252  NS_LOG_INFO (this << " Update UEs statistics");
1253  for (itStats = m_flowStatsDl.begin (); itStats != m_flowStatsDl.end (); itStats++)
1254  {
1255  (*itStats).second.totalBytesTransmitted += (*itStats).second.lastTtiBytesTrasmitted;
1256  // update average throughput (see eq. 12.3 of Sec 12.3.1.2 of LTE – The UMTS Long Term Evolution, Ed Wiley)
1257  (*itStats).second.lastAveragedThroughput = ((1.0 - (1.0 / m_timeWindow)) * (*itStats).second.lastAveragedThroughput) + ((1.0 / m_timeWindow) * (double)((*itStats).second.lastTtiBytesTrasmitted / 0.001));
1258  NS_LOG_INFO (this << " UE total bytes " << (*itStats).second.totalBytesTransmitted);
1259  NS_LOG_INFO (this << " UE average throughput " << (*itStats).second.lastAveragedThroughput);
1260  (*itStats).second.lastTtiBytesTrasmitted = 0;
1261  }
1262 
1263  m_schedSapUser->SchedDlConfigInd (ret);
1264 
1265 
1266  return;
1267 }
1268 
1269 void
1270 PfFfMacScheduler::DoSchedDlRachInfoReq (const struct FfMacSchedSapProvider::SchedDlRachInfoReqParameters& params)
1271 {
1272  NS_LOG_FUNCTION (this);
1273 
1274  m_rachList = params.m_rachList;
1275 
1276  return;
1277 }
1278 
1279 void
1280 PfFfMacScheduler::DoSchedDlCqiInfoReq (const struct FfMacSchedSapProvider::SchedDlCqiInfoReqParameters& params)
1281 {
1282  NS_LOG_FUNCTION (this);
1283 
1284  for (unsigned int i = 0; i < params.m_cqiList.size (); i++)
1285  {
1286  if ( params.m_cqiList.at (i).m_cqiType == CqiListElement_s::P10 )
1287  {
1288  // wideband CQI reporting
1289  std::map <uint16_t,uint8_t>::iterator it;
1290  uint16_t rnti = params.m_cqiList.at (i).m_rnti;
1291  it = m_p10CqiRxed.find (rnti);
1292  if (it == m_p10CqiRxed.end ())
1293  {
1294  // create the new entry
1295  m_p10CqiRxed.insert ( std::pair<uint16_t, uint8_t > (rnti, params.m_cqiList.at (i).m_wbCqi.at (0)) ); // only codeword 0 at this stage (SISO)
1296  // generate correspondent timer
1297  m_p10CqiTimers.insert ( std::pair<uint16_t, uint32_t > (rnti, m_cqiTimersThreshold));
1298  }
1299  else
1300  {
1301  // update the CQI value and refresh correspondent timer
1302  (*it).second = params.m_cqiList.at (i).m_wbCqi.at (0);
1303  // update correspondent timer
1304  std::map <uint16_t,uint32_t>::iterator itTimers;
1305  itTimers = m_p10CqiTimers.find (rnti);
1306  (*itTimers).second = m_cqiTimersThreshold;
1307  }
1308  }
1309  else if ( params.m_cqiList.at (i).m_cqiType == CqiListElement_s::A30 )
1310  {
1311  // subband CQI reporting high layer configured
1312  std::map <uint16_t,SbMeasResult_s>::iterator it;
1313  uint16_t rnti = params.m_cqiList.at (i).m_rnti;
1314  it = m_a30CqiRxed.find (rnti);
1315  if (it == m_a30CqiRxed.end ())
1316  {
1317  // create the new entry
1318  m_a30CqiRxed.insert ( std::pair<uint16_t, SbMeasResult_s > (rnti, params.m_cqiList.at (i).m_sbMeasResult) );
1319  m_a30CqiTimers.insert ( std::pair<uint16_t, uint32_t > (rnti, m_cqiTimersThreshold));
1320  }
1321  else
1322  {
1323  // update the CQI value and refresh correspondent timer
1324  (*it).second = params.m_cqiList.at (i).m_sbMeasResult;
1325  std::map <uint16_t,uint32_t>::iterator itTimers;
1326  itTimers = m_a30CqiTimers.find (rnti);
1327  (*itTimers).second = m_cqiTimersThreshold;
1328  }
1329  }
1330  else
1331  {
1332  NS_LOG_ERROR (this << " CQI type unknown");
1333  }
1334  }
1335 
1336  return;
1337 }
1338 
1339 
1340 double
1341 PfFfMacScheduler::EstimateUlSinr (uint16_t rnti, uint16_t rb)
1342 {
1343  std::map <uint16_t, std::vector <double> >::iterator itCqi = m_ueCqi.find (rnti);
1344  if (itCqi == m_ueCqi.end ())
1345  {
1346  // no cqi info about this UE
1347  return (NO_SINR);
1348 
1349  }
1350  else
1351  {
1352  // take the average SINR value among the available
1353  double sinrSum = 0;
1354  int sinrNum = 0;
1355  for (uint32_t i = 0; i < m_cschedCellConfig.m_ulBandwidth; i++)
1356  {
1357  double sinr = (*itCqi).second.at (i);
1358  if (sinr != NO_SINR)
1359  {
1360  sinrSum += sinr;
1361  sinrNum++;
1362  }
1363  }
1364  double estimatedSinr = sinrSum / (double)sinrNum;
1365  // store the value
1366  (*itCqi).second.at (rb) = estimatedSinr;
1367  return (estimatedSinr);
1368  }
1369 }
1370 
1371 void
1372 PfFfMacScheduler::DoSchedUlTriggerReq (const struct FfMacSchedSapProvider::SchedUlTriggerReqParameters& params)
1373 {
1374  NS_LOG_FUNCTION (this << " UL - Frame no. " << (params.m_sfnSf >> 4) << " subframe no. " << (0xF & params.m_sfnSf) << " size " << params.m_ulInfoList.size ());
1375 
1376  RefreshUlCqiMaps ();
1377 
1378  // Generate RBs map
1379  FfMacSchedSapUser::SchedUlConfigIndParameters ret;
1380  std::vector <bool> rbMap;
1381  uint16_t rbAllocatedNum = 0;
1382  std::set <uint16_t> rntiAllocated;
1383  std::vector <uint16_t> rbgAllocationMap;
1384  // update with RACH allocation map
1385  rbgAllocationMap = m_rachAllocationMap;
1386  //rbgAllocationMap.resize (m_cschedCellConfig.m_ulBandwidth, 0);
1387  m_rachAllocationMap.clear ();
1388  m_rachAllocationMap.resize (m_cschedCellConfig.m_ulBandwidth, 0);
1389 
1390  rbMap.resize (m_cschedCellConfig.m_ulBandwidth, false);
1391  // remove RACH allocation
1392  for (uint16_t i = 0; i < m_cschedCellConfig.m_ulBandwidth; i++)
1393  {
1394  if (rbgAllocationMap.at (i) != 0)
1395  {
1396  rbMap.at (i) = true;
1397  NS_LOG_DEBUG (this << " Allocated for RACH " << i);
1398  }
1399  }
1400 
1401 
1402  if (m_harqOn == true)
1403  {
1404  // Process UL HARQ feedback
1405  // update UL HARQ proc id
1406  std::map <uint16_t, uint8_t>::iterator itProcId;
1407  for (itProcId = m_ulHarqCurrentProcessId.begin (); itProcId != m_ulHarqCurrentProcessId.end (); itProcId++)
1408  {
1409  (*itProcId).second = ((*itProcId).second + 1) % HARQ_PROC_NUM;
1410  }
1411 
1412  for (uint16_t i = 0; i < params.m_ulInfoList.size (); i++)
1413  {
1414  if (params.m_ulInfoList.at (i).m_receptionStatus == UlInfoListElement_s::NotOk)
1415  {
1416  // retx correspondent block: retrieve the UL-DCI
1417  uint16_t rnti = params.m_ulInfoList.at (i).m_rnti;
1418  itProcId = m_ulHarqCurrentProcessId.find (rnti);
1419  if (itProcId == m_ulHarqCurrentProcessId.end ())
1420  {
1421  NS_LOG_ERROR ("No info find in HARQ buffer for UE (might change eNB) " << rnti);
1422  }
1423  uint8_t harqId = (uint8_t)((*itProcId).second - HARQ_PERIOD) % HARQ_PROC_NUM;
1424  NS_LOG_INFO (this << " UL-HARQ retx RNTI " << rnti << " harqId " << (uint16_t)harqId << " i " << i << " size " << params.m_ulInfoList.size ());
1425  std::map <uint16_t, UlHarqProcessesDciBuffer_t>::iterator itHarq = m_ulHarqProcessesDciBuffer.find (rnti);
1426  if (itHarq == m_ulHarqProcessesDciBuffer.end ())
1427  {
1428  NS_LOG_ERROR ("No info find in HARQ buffer for UE (might change eNB) " << rnti);
1429  continue;
1430  }
1431  UlDciListElement_s dci = (*itHarq).second.at (harqId);
1432  std::map <uint16_t, UlHarqProcessesStatus_t>::iterator itStat = m_ulHarqProcessesStatus.find (rnti);
1433  if (itStat == m_ulHarqProcessesStatus.end ())
1434  {
1435  NS_LOG_ERROR ("No info find in HARQ buffer for UE (might change eNB) " << rnti);
1436  }
1437  if ((*itStat).second.at (harqId) >= 3)
1438  {
1439  NS_LOG_INFO ("Max number of retransmissions reached (UL)-> drop process");
1440  continue;
1441  }
1442  bool free = true;
1443  for (int j = dci.m_rbStart; j < dci.m_rbStart + dci.m_rbLen; j++)
1444  {
1445  if (rbMap.at (j) == true)
1446  {
1447  free = false;
1448  NS_LOG_INFO (this << " BUSY " << j);
1449  }
1450  }
1451  if (free)
1452  {
1453  // retx on the same RBs
1454  for (int j = dci.m_rbStart; j < dci.m_rbStart + dci.m_rbLen; j++)
1455  {
1456  rbMap.at (j) = true;
1457  rbgAllocationMap.at (j) = dci.m_rnti;
1458  NS_LOG_INFO ("\tRB " << j);
1459  rbAllocatedNum++;
1460  }
1461  NS_LOG_INFO (this << " Send retx in the same RBs " << (uint16_t)dci.m_rbStart << " to " << dci.m_rbStart + dci.m_rbLen << " RV " << (*itStat).second.at (harqId) + 1);
1462  }
1463  else
1464  {
1465  NS_LOG_INFO ("Cannot allocate retx due to RACH allocations for UE " << rnti);
1466  continue;
1467  }
1468  dci.m_ndi = 0;
1469  // Update HARQ buffers with new HarqId
1470  (*itStat).second.at ((*itProcId).second) = (*itStat).second.at (harqId) + 1;
1471  (*itStat).second.at (harqId) = 0;
1472  (*itHarq).second.at ((*itProcId).second) = dci;
1473  ret.m_dciList.push_back (dci);
1474  rntiAllocated.insert (dci.m_rnti);
1475  }
1476  else
1477  {
1478  NS_LOG_INFO (this << " HARQ-ACK feedback from RNTI " << params.m_ulInfoList.at (i).m_rnti);
1479  }
1480  }
1481  }
1482 
1483  std::map <uint16_t,uint32_t>::iterator it;
1484  int nflows = 0;
1485 
1486  for (it = m_ceBsrRxed.begin (); it != m_ceBsrRxed.end (); it++)
1487  {
1488  std::set <uint16_t>::iterator itRnti = rntiAllocated.find ((*it).first);
1489  // select UEs with queues not empty and not yet allocated for HARQ
1490  if (((*it).second > 0)&&(itRnti == rntiAllocated.end ()))
1491  {
1492  nflows++;
1493  }
1494  }
1495 
1496  if (nflows == 0)
1497  {
1498  if (ret.m_dciList.size () > 0)
1499  {
1500  m_schedSapUser->SchedUlConfigInd (ret);
1501  }
1502 
1503  return; // no flows to be scheduled
1504  }
1505 
1506 
1507  // Divide the remaining resources equally among the active users starting from the subsequent one served last scheduling trigger
1508  uint16_t rbPerFlow = (m_cschedCellConfig.m_ulBandwidth) / (nflows + rntiAllocated.size ());
1509  if (rbPerFlow < 3)
1510  {
1511  rbPerFlow = 3; // at least 3 rbg per flow (till available resource) to ensure TxOpportunity >= 7 bytes
1512  }
1513  int rbAllocated = 0;
1514 
1515  std::map <uint16_t, pfsFlowPerf_t>::iterator itStats;
1516  if (m_nextRntiUl != 0)
1517  {
1518  for (it = m_ceBsrRxed.begin (); it != m_ceBsrRxed.end (); it++)
1519  {
1520  if ((*it).first == m_nextRntiUl)
1521  {
1522  break;
1523  }
1524  }
1525  if (it == m_ceBsrRxed.end ())
1526  {
1527  NS_LOG_ERROR (this << " no user found");
1528  }
1529  }
1530  else
1531  {
1532  it = m_ceBsrRxed.begin ();
1533  m_nextRntiUl = (*it).first;
1534  }
1535  do
1536  {
1537  std::set <uint16_t>::iterator itRnti = rntiAllocated.find ((*it).first);
1538  if ((itRnti != rntiAllocated.end ())||((*it).second == 0))
1539  {
1540  // UE already allocated for UL-HARQ -> skip it
1541  it++;
1542  if (it == m_ceBsrRxed.end ())
1543  {
1544  // restart from the first
1545  it = m_ceBsrRxed.begin ();
1546  }
1547  continue;
1548  }
1549  if (rbAllocated + rbPerFlow - 1 > m_cschedCellConfig.m_ulBandwidth)
1550  {
1551  // limit to physical resources last resource assignment
1552  rbPerFlow = m_cschedCellConfig.m_ulBandwidth - rbAllocated;
1553  // at least 3 rbg per flow to ensure TxOpportunity >= 7 bytes
1554  if (rbPerFlow < 3)
1555  {
1556  // terminate allocation
1557  rbPerFlow = 0;
1558  }
1559  }
1560 
1561  UlDciListElement_s uldci;
1562  uldci.m_rnti = (*it).first;
1563  uldci.m_rbLen = rbPerFlow;
1564  bool allocated = false;
1565  NS_LOG_INFO (this << " RB Allocated " << rbAllocated << " rbPerFlow " << rbPerFlow);
1566  while ((!allocated)&&((rbAllocated + rbPerFlow - 1) < m_cschedCellConfig.m_ulBandwidth) && (rbPerFlow != 0))
1567  {
1568  // check availability
1569  bool free = true;
1570  for (uint16_t j = rbAllocated; j < rbAllocated + rbPerFlow; j++)
1571  {
1572  if (rbMap.at (j) == true)
1573  {
1574  free = false;
1575  break;
1576  }
1577  }
1578  if (free)
1579  {
1580  uldci.m_rbStart = rbAllocated;
1581 
1582  for (uint16_t j = rbAllocated; j < rbAllocated + rbPerFlow; j++)
1583  {
1584  rbMap.at (j) = true;
1585  // store info on allocation for managing ul-cqi interpretation
1586  rbgAllocationMap.at (j) = (*it).first;
1587  }
1588  rbAllocated += rbPerFlow;
1589  allocated = true;
1590  break;
1591  }
1592  rbAllocated++;
1593  if (rbAllocated + rbPerFlow - 1 > m_cschedCellConfig.m_ulBandwidth)
1594  {
1595  // limit to physical resources last resource assignment
1596  rbPerFlow = m_cschedCellConfig.m_ulBandwidth - rbAllocated;
1597  // at least 3 rbg per flow to ensure TxOpportunity >= 7 bytes
1598  if (rbPerFlow < 3)
1599  {
1600  // terminate allocation
1601  rbPerFlow = 0;
1602  }
1603  }
1604  }
1605  if (!allocated)
1606  {
1607  // unable to allocate new resource: finish scheduling
1608  m_nextRntiUl = (*it).first;
1609  if (ret.m_dciList.size () > 0)
1610  {
1611  m_schedSapUser->SchedUlConfigInd (ret);
1612  }
1613  m_allocationMaps.insert (std::pair <uint16_t, std::vector <uint16_t> > (params.m_sfnSf, rbgAllocationMap));
1614  return;
1615  }
1616 
1617 
1618 
1619  std::map <uint16_t, std::vector <double> >::iterator itCqi = m_ueCqi.find ((*it).first);
1620  int cqi = 0;
1621  if (itCqi == m_ueCqi.end ())
1622  {
1623  // no cqi info about this UE
1624  uldci.m_mcs = 0; // MCS 0 -> UL-AMC TBD
1625  }
1626  else
1627  {
1628  // take the lowest CQI value (worst RB)
1629  double minSinr = (*itCqi).second.at (uldci.m_rbStart);
1630  if (minSinr == NO_SINR)
1631  {
1632  minSinr = EstimateUlSinr ((*it).first, uldci.m_rbStart);
1633  }
1634  for (uint16_t i = uldci.m_rbStart; i < uldci.m_rbStart + uldci.m_rbLen; i++)
1635  {
1636  double sinr = (*itCqi).second.at (i);
1637  if (sinr == NO_SINR)
1638  {
1639  sinr = EstimateUlSinr ((*it).first, i);
1640  }
1641  if ((*itCqi).second.at (i) < minSinr)
1642  {
1643  minSinr = (*itCqi).second.at (i);
1644  }
1645  }
1646 
1647  // translate SINR -> cqi: WILD ACK: same as DL
1648  double s = log2 ( 1 + (
1649  std::pow (10, minSinr / 10 ) /
1650  ( (-std::log (5.0 * 0.00005 )) / 1.5) ));
1651  cqi = m_amc->GetCqiFromSpectralEfficiency (s);
1652  if (cqi == 0)
1653  {
1654  it++;
1655  if (it == m_ceBsrRxed.end ())
1656  {
1657  // restart from the first
1658  it = m_ceBsrRxed.begin ();
1659  }
1660  continue; // CQI == 0 means "out of range" (see table 7.2.3-1 of 36.213)
1661  }
1662  uldci.m_mcs = m_amc->GetMcsFromCqi (cqi);
1663  }
1664 
1665  uldci.m_tbSize = (m_amc->GetTbSizeFromMcs (uldci.m_mcs, rbPerFlow) / 8);
1666  UpdateUlRlcBufferInfo (uldci.m_rnti, uldci.m_tbSize);
1667  uldci.m_ndi = 1;
1668  uldci.m_cceIndex = 0;
1669  uldci.m_aggrLevel = 1;
1670  uldci.m_ueTxAntennaSelection = 3; // antenna selection OFF
1671  uldci.m_hopping = false;
1672  uldci.m_n2Dmrs = 0;
1673  uldci.m_tpc = 0; // no power control
1674  uldci.m_cqiRequest = false; // only period CQI at this stage
1675  uldci.m_ulIndex = 0; // TDD parameter
1676  uldci.m_dai = 1; // TDD parameter
1677  uldci.m_freqHopping = 0;
1678  uldci.m_pdcchPowerOffset = 0; // not used
1679  ret.m_dciList.push_back (uldci);
1680  // store DCI for HARQ_PERIOD
1681  uint8_t harqId = 0;
1682  if (m_harqOn == true)
1683  {
1684  std::map <uint16_t, uint8_t>::iterator itProcId;
1685  itProcId = m_ulHarqCurrentProcessId.find (uldci.m_rnti);
1686  if (itProcId == m_ulHarqCurrentProcessId.end ())
1687  {
1688  NS_FATAL_ERROR ("No info find in HARQ buffer for UE " << uldci.m_rnti);
1689  }
1690  harqId = (*itProcId).second;
1691  std::map <uint16_t, UlHarqProcessesDciBuffer_t>::iterator itDci = m_ulHarqProcessesDciBuffer.find (uldci.m_rnti);
1692  if (itDci == m_ulHarqProcessesDciBuffer.end ())
1693  {
1694  NS_FATAL_ERROR ("Unable to find RNTI entry in UL DCI HARQ buffer for RNTI " << uldci.m_rnti);
1695  }
1696  (*itDci).second.at (harqId) = uldci;
1697  }
1698 
1699  NS_LOG_INFO (this << " UE Allocation RNTI " << (*it).first << " startPRB " << (uint32_t)uldci.m_rbStart << " nPRB " << (uint32_t)uldci.m_rbLen << " CQI " << cqi << " MCS " << (uint32_t)uldci.m_mcs << " TBsize " << uldci.m_tbSize << " RbAlloc " << rbAllocated << " harqId " << (uint16_t)harqId);
1700 
1701  // update TTI UE stats
1702  itStats = m_flowStatsUl.find ((*it).first);
1703  if (itStats != m_flowStatsUl.end ())
1704  {
1705  (*itStats).second.lastTtiBytesTrasmitted = uldci.m_tbSize;
1706  }
1707  else
1708  {
1709  NS_LOG_DEBUG (this << " No Stats for this allocated UE");
1710  }
1711 
1712 
1713  it++;
1714  if (it == m_ceBsrRxed.end ())
1715  {
1716  // restart from the first
1717  it = m_ceBsrRxed.begin ();
1718  }
1719  if ((rbAllocated == m_cschedCellConfig.m_ulBandwidth) || (rbPerFlow == 0))
1720  {
1721  // Stop allocation: no more PRBs
1722  m_nextRntiUl = (*it).first;
1723  break;
1724  }
1725  }
1726  while (((*it).first != m_nextRntiUl)&&(rbPerFlow!=0));
1727 
1728 
1729  // Update global UE stats
1730  // update UEs stats
1731  for (itStats = m_flowStatsUl.begin (); itStats != m_flowStatsUl.end (); itStats++)
1732  {
1733  (*itStats).second.totalBytesTransmitted += (*itStats).second.lastTtiBytesTrasmitted;
1734  // update average throughput (see eq. 12.3 of Sec 12.3.1.2 of LTE – The UMTS Long Term Evolution, Ed Wiley)
1735  (*itStats).second.lastAveragedThroughput = ((1.0 - (1.0 / m_timeWindow)) * (*itStats).second.lastAveragedThroughput) + ((1.0 / m_timeWindow) * (double)((*itStats).second.lastTtiBytesTrasmitted / 0.001));
1736  NS_LOG_INFO (this << " UE total bytes " << (*itStats).second.totalBytesTransmitted);
1737  NS_LOG_INFO (this << " UE average throughput " << (*itStats).second.lastAveragedThroughput);
1738  (*itStats).second.lastTtiBytesTrasmitted = 0;
1739  }
1740  m_allocationMaps.insert (std::pair <uint16_t, std::vector <uint16_t> > (params.m_sfnSf, rbgAllocationMap));
1741  m_schedSapUser->SchedUlConfigInd (ret);
1742 
1743  return;
1744 }
1745 
1746 void
1747 PfFfMacScheduler::DoSchedUlNoiseInterferenceReq (const struct FfMacSchedSapProvider::SchedUlNoiseInterferenceReqParameters& params)
1748 {
1749  NS_LOG_FUNCTION (this);
1750  return;
1751 }
1752 
1753 void
1754 PfFfMacScheduler::DoSchedUlSrInfoReq (const struct FfMacSchedSapProvider::SchedUlSrInfoReqParameters& params)
1755 {
1756  NS_LOG_FUNCTION (this);
1757  return;
1758 }
1759 
1760 void
1761 PfFfMacScheduler::DoSchedUlMacCtrlInfoReq (const struct FfMacSchedSapProvider::SchedUlMacCtrlInfoReqParameters& params)
1762 {
1763  NS_LOG_FUNCTION (this);
1764 
1765  std::map <uint16_t,uint32_t>::iterator it;
1766 
1767  for (unsigned int i = 0; i < params.m_macCeList.size (); i++)
1768  {
1769  if ( params.m_macCeList.at (i).m_macCeType == MacCeListElement_s::BSR )
1770  {
1771  // buffer status report
1772  // note that this scheduler does not differentiate the
1773  // allocation according to which LCGs have more/less bytes
1774  // to send.
1775  // Hence the BSR of different LCGs are just summed up to get
1776  // a total queue size that is used for allocation purposes.
1777 
1778  uint32_t buffer = 0;
1779  for (uint8_t lcg = 0; lcg < 4; ++lcg)
1780  {
1781  uint8_t bsrId = params.m_macCeList.at (i).m_macCeValue.m_bufferStatus.at (lcg);
1782  buffer += BufferSizeLevelBsr::BsrId2BufferSize (bsrId);
1783  }
1784 
1785  uint16_t rnti = params.m_macCeList.at (i).m_rnti;
1786  NS_LOG_LOGIC (this << "RNTI=" << rnti << " buffer=" << buffer);
1787  it = m_ceBsrRxed.find (rnti);
1788  if (it == m_ceBsrRxed.end ())
1789  {
1790  // create the new entry
1791  m_ceBsrRxed.insert ( std::pair<uint16_t, uint32_t > (rnti, buffer));
1792  }
1793  else
1794  {
1795  // update the buffer size value
1796  (*it).second = buffer;
1797  }
1798  }
1799  }
1800 
1801  return;
1802 }
1803 
1804 void
1805 PfFfMacScheduler::DoSchedUlCqiInfoReq (const struct FfMacSchedSapProvider::SchedUlCqiInfoReqParameters& params)
1806 {
1807  NS_LOG_FUNCTION (this);
1808 // retrieve the allocation for this subframe
1809  switch (m_ulCqiFilter)
1810  {
1811  case FfMacScheduler::SRS_UL_CQI:
1812  {
1813  // filter all the CQIs that are not SRS based
1814  if (params.m_ulCqi.m_type != UlCqi_s::SRS)
1815  {
1816  return;
1817  }
1818  }
1819  break;
1820  case FfMacScheduler::PUSCH_UL_CQI:
1821  {
1822  // filter all the CQIs that are not SRS based
1823  if (params.m_ulCqi.m_type != UlCqi_s::PUSCH)
1824  {
1825  return;
1826  }
1827  }
1828  case FfMacScheduler::ALL_UL_CQI:
1829  break;
1830 
1831  default:
1832  NS_FATAL_ERROR ("Unknown UL CQI type");
1833  }
1834 
1835  switch (params.m_ulCqi.m_type)
1836  {
1837  case UlCqi_s::PUSCH:
1838  {
1839  std::map <uint16_t, std::vector <uint16_t> >::iterator itMap;
1840  std::map <uint16_t, std::vector <double> >::iterator itCqi;
1841  NS_LOG_DEBUG (this << " Collect PUSCH CQIs of Frame no. " << (params.m_sfnSf >> 4) << " subframe no. " << (0xF & params.m_sfnSf));
1842  itMap = m_allocationMaps.find (params.m_sfnSf);
1843  if (itMap == m_allocationMaps.end ())
1844  {
1845  return;
1846  }
1847  for (uint32_t i = 0; i < (*itMap).second.size (); i++)
1848  {
1849  // convert from fixed point notation Sxxxxxxxxxxx.xxx to double
1850  double sinr = LteFfConverter::fpS11dot3toDouble (params.m_ulCqi.m_sinr.at (i));
1851  itCqi = m_ueCqi.find ((*itMap).second.at (i));
1852  if (itCqi == m_ueCqi.end ())
1853  {
1854  // create a new entry
1855  std::vector <double> newCqi;
1856  for (uint32_t j = 0; j < m_cschedCellConfig.m_ulBandwidth; j++)
1857  {
1858  if (i == j)
1859  {
1860  newCqi.push_back (sinr);
1861  }
1862  else
1863  {
1864  // initialize with NO_SINR value.
1865  newCqi.push_back (NO_SINR);
1866  }
1867 
1868  }
1869  m_ueCqi.insert (std::pair <uint16_t, std::vector <double> > ((*itMap).second.at (i), newCqi));
1870  // generate correspondent timer
1871  m_ueCqiTimers.insert (std::pair <uint16_t, uint32_t > ((*itMap).second.at (i), m_cqiTimersThreshold));
1872  }
1873  else
1874  {
1875  // update the value
1876  (*itCqi).second.at (i) = sinr;
1877  NS_LOG_DEBUG (this << " RNTI " << (*itMap).second.at (i) << " RB " << i << " SINR " << sinr);
1878  // update correspondent timer
1879  std::map <uint16_t, uint32_t>::iterator itTimers;
1880  itTimers = m_ueCqiTimers.find ((*itMap).second.at (i));
1881  (*itTimers).second = m_cqiTimersThreshold;
1882 
1883  }
1884 
1885  }
1886  // remove obsolete info on allocation
1887  m_allocationMaps.erase (itMap);
1888  }
1889  break;
1890  case UlCqi_s::SRS:
1891  {
1892  // get the RNTI from vendor specific parameters
1893  uint16_t rnti = 0;
1894  NS_ASSERT (params.m_vendorSpecificList.size () > 0);
1895  for (uint16_t i = 0; i < params.m_vendorSpecificList.size (); i++)
1896  {
1897  if (params.m_vendorSpecificList.at (i).m_type == SRS_CQI_RNTI_VSP)
1898  {
1899  Ptr<SrsCqiRntiVsp> vsp = DynamicCast<SrsCqiRntiVsp> (params.m_vendorSpecificList.at (i).m_value);
1900  rnti = vsp->GetRnti ();
1901  }
1902  }
1903  std::map <uint16_t, std::vector <double> >::iterator itCqi;
1904  itCqi = m_ueCqi.find (rnti);
1905  if (itCqi == m_ueCqi.end ())
1906  {
1907  // create a new entry
1908  std::vector <double> newCqi;
1909  for (uint32_t j = 0; j < m_cschedCellConfig.m_ulBandwidth; j++)
1910  {
1911  double sinr = LteFfConverter::fpS11dot3toDouble (params.m_ulCqi.m_sinr.at (j));
1912  newCqi.push_back (sinr);
1913  NS_LOG_INFO (this << " RNTI " << rnti << " new SRS-CQI for RB " << j << " value " << sinr);
1914 
1915  }
1916  m_ueCqi.insert (std::pair <uint16_t, std::vector <double> > (rnti, newCqi));
1917  // generate correspondent timer
1918  m_ueCqiTimers.insert (std::pair <uint16_t, uint32_t > (rnti, m_cqiTimersThreshold));
1919  }
1920  else
1921  {
1922  // update the values
1923  for (uint32_t j = 0; j < m_cschedCellConfig.m_ulBandwidth; j++)
1924  {
1925  double sinr = LteFfConverter::fpS11dot3toDouble (params.m_ulCqi.m_sinr.at (j));
1926  (*itCqi).second.at (j) = sinr;
1927  NS_LOG_INFO (this << " RNTI " << rnti << " update SRS-CQI for RB " << j << " value " << sinr);
1928  }
1929  // update correspondent timer
1930  std::map <uint16_t, uint32_t>::iterator itTimers;
1931  itTimers = m_ueCqiTimers.find (rnti);
1932  (*itTimers).second = m_cqiTimersThreshold;
1933 
1934  }
1935 
1936 
1937  }
1938  break;
1939  case UlCqi_s::PUCCH_1:
1940  case UlCqi_s::PUCCH_2:
1941  case UlCqi_s::PRACH:
1942  {
1943  NS_FATAL_ERROR ("PfFfMacScheduler supports only PUSCH and SRS UL-CQIs");
1944  }
1945  break;
1946  default:
1947  NS_FATAL_ERROR ("Unknown type of UL-CQI");
1948  }
1949  return;
1950 }
1951 
1952 void
1953 PfFfMacScheduler::RefreshDlCqiMaps (void)
1954 {
1955  // refresh DL CQI P01 Map
1956  std::map <uint16_t,uint32_t>::iterator itP10 = m_p10CqiTimers.begin ();
1957  while (itP10 != m_p10CqiTimers.end ())
1958  {
1959  NS_LOG_INFO (this << " P10-CQI for user " << (*itP10).first << " is " << (uint32_t)(*itP10).second << " thr " << (uint32_t)m_cqiTimersThreshold);
1960  if ((*itP10).second == 0)
1961  {
1962  // delete correspondent entries
1963  std::map <uint16_t,uint8_t>::iterator itMap = m_p10CqiRxed.find ((*itP10).first);
1964  NS_ASSERT_MSG (itMap != m_p10CqiRxed.end (), " Does not find CQI report for user " << (*itP10).first);
1965  NS_LOG_INFO (this << " P10-CQI expired for user " << (*itP10).first);
1966  m_p10CqiRxed.erase (itMap);
1967  std::map <uint16_t,uint32_t>::iterator temp = itP10;
1968  itP10++;
1969  m_p10CqiTimers.erase (temp);
1970  }
1971  else
1972  {
1973  (*itP10).second--;
1974  itP10++;
1975  }
1976  }
1977 
1978  // refresh DL CQI A30 Map
1979  std::map <uint16_t,uint32_t>::iterator itA30 = m_a30CqiTimers.begin ();
1980  while (itA30 != m_a30CqiTimers.end ())
1981  {
1982  NS_LOG_INFO (this << " A30-CQI for user " << (*itA30).first << " is " << (uint32_t)(*itA30).second << " thr " << (uint32_t)m_cqiTimersThreshold);
1983  if ((*itA30).second == 0)
1984  {
1985  // delete correspondent entries
1986  std::map <uint16_t,SbMeasResult_s>::iterator itMap = m_a30CqiRxed.find ((*itA30).first);
1987  NS_ASSERT_MSG (itMap != m_a30CqiRxed.end (), " Does not find CQI report for user " << (*itA30).first);
1988  NS_LOG_INFO (this << " A30-CQI expired for user " << (*itA30).first);
1989  m_a30CqiRxed.erase (itMap);
1990  std::map <uint16_t,uint32_t>::iterator temp = itA30;
1991  itA30++;
1992  m_a30CqiTimers.erase (temp);
1993  }
1994  else
1995  {
1996  (*itA30).second--;
1997  itA30++;
1998  }
1999  }
2000 
2001  return;
2002 }
2003 
2004 
2005 void
2006 PfFfMacScheduler::RefreshUlCqiMaps (void)
2007 {
2008  // refresh UL CQI Map
2009  std::map <uint16_t,uint32_t>::iterator itUl = m_ueCqiTimers.begin ();
2010  while (itUl != m_ueCqiTimers.end ())
2011  {
2012  NS_LOG_INFO (this << " UL-CQI for user " << (*itUl).first << " is " << (uint32_t)(*itUl).second << " thr " << (uint32_t)m_cqiTimersThreshold);
2013  if ((*itUl).second == 0)
2014  {
2015  // delete correspondent entries
2016  std::map <uint16_t, std::vector <double> >::iterator itMap = m_ueCqi.find ((*itUl).first);
2017  NS_ASSERT_MSG (itMap != m_ueCqi.end (), " Does not find CQI report for user " << (*itUl).first);
2018  NS_LOG_INFO (this << " UL-CQI exired for user " << (*itUl).first);
2019  (*itMap).second.clear ();
2020  m_ueCqi.erase (itMap);
2021  std::map <uint16_t,uint32_t>::iterator temp = itUl;
2022  itUl++;
2023  m_ueCqiTimers.erase (temp);
2024  }
2025  else
2026  {
2027  (*itUl).second--;
2028  itUl++;
2029  }
2030  }
2031 
2032  return;
2033 }
2034 
2035 void
2036 PfFfMacScheduler::UpdateDlRlcBufferInfo (uint16_t rnti, uint8_t lcid, uint16_t size)
2037 {
2038  std::map<LteFlowId_t, FfMacSchedSapProvider::SchedDlRlcBufferReqParameters>::iterator it;
2039  LteFlowId_t flow (rnti, lcid);
2040  it = m_rlcBufferReq.find (flow);
2041  if (it != m_rlcBufferReq.end ())
2042  {
2043  NS_LOG_INFO (this << " UE " << rnti << " LC " << (uint16_t)lcid << " txqueue " << (*it).second.m_rlcTransmissionQueueSize << " retxqueue " << (*it).second.m_rlcRetransmissionQueueSize << " status " << (*it).second.m_rlcStatusPduSize << " decrease " << size);
2044  // Update queues: RLC tx order Status, ReTx, Tx
2045  // Update status queue
2046  if (((*it).second.m_rlcStatusPduSize > 0) && (size >= (*it).second.m_rlcStatusPduSize))
2047  {
2048  (*it).second.m_rlcStatusPduSize = 0;
2049  }
2050  else if (((*it).second.m_rlcRetransmissionQueueSize > 0) && (size >= (*it).second.m_rlcRetransmissionQueueSize))
2051  {
2052  (*it).second.m_rlcRetransmissionQueueSize = 0;
2053  }
2054  else if ((*it).second.m_rlcTransmissionQueueSize > 0)
2055  {
2056  // update transmission queue
2057  if ((*it).second.m_rlcTransmissionQueueSize <= size)
2058  {
2059  (*it).second.m_rlcTransmissionQueueSize = 0;
2060  }
2061  else
2062  {
2063  size -= 2; // remove minimun RLC overhead due to header
2064  (*it).second.m_rlcTransmissionQueueSize -= size;
2065  }
2066  }
2067  }
2068  else
2069  {
2070  NS_LOG_ERROR (this << " Does not find DL RLC Buffer Report of UE " << rnti);
2071  }
2072 }
2073 
2074 void
2075 PfFfMacScheduler::UpdateUlRlcBufferInfo (uint16_t rnti, uint16_t size)
2076 {
2077 
2078  size = size - 2; // remove the minimum RLC overhead
2079  std::map <uint16_t,uint32_t>::iterator it = m_ceBsrRxed.find (rnti);
2080  if (it != m_ceBsrRxed.end ())
2081  {
2082  NS_LOG_INFO (this << " UE " << rnti << " size " << size << " BSR " << (*it).second);
2083  if ((*it).second >= size)
2084  {
2085  (*it).second -= size;
2086  }
2087  else
2088  {
2089  (*it).second = 0;
2090  }
2091  }
2092  else
2093  {
2094  NS_LOG_ERROR (this << " Does not find BSR report info of UE " << rnti);
2095  }
2096 
2097 }
2098 
2099 void
2100 PfFfMacScheduler::TransmissionModeConfigurationUpdate (uint16_t rnti, uint8_t txMode)
2101 {
2102  NS_LOG_FUNCTION (this << " RNTI " << rnti << " txMode " << (uint16_t)txMode);
2103  FfMacCschedSapUser::CschedUeConfigUpdateIndParameters params;
2104  params.m_rnti = rnti;
2105  params.m_transmissionMode = txMode;
2106  m_cschedSapUser->CschedUeConfigUpdateInd (params);
2107 }
2108 
2109 
2110 }
#define NS_LOG_FUNCTION(parameters)
Definition: log.h:311
Hold a bool native type.
Definition: boolean.h:38
Implements the SCHED SAP and CSCHED SAP for a Proportional Fair scheduler.
virtual FfMacSchedSapProvider * GetFfMacSchedSapProvider()
#define NS_ASSERT(condition)
Definition: assert.h:64
uint8_t HarqProcessAvailability(uint16_t rnti)
Return the availability of free process for the RNTI specified.
#define NS_LOG_COMPONENT_DEFINE(name)
Definition: log.h:122
virtual void CschedCellConfigReq(const struct CschedCellConfigReqParameters &params)
CSCHED_CELL_CONFIG_REQ.
Provides the CSCHED SAP.
#define NS_LOG_INFO(msg)
Definition: log.h:264
See section 4.3.10 buildRARListElement.
virtual void SetFfMacSchedSapUser(FfMacSchedSapUser *s)
#define NS_FATAL_ERROR(msg)
fatal error handling
Definition: fatal-error.h:72
virtual FfMacCschedSapProvider * GetFfMacCschedSapProvider()
Hold an unsigned integer type.
Definition: uinteger.h:46
Provides the SCHED SAP.
#define NS_LOG_LOGIC(msg)
Definition: log.h:334
virtual void DoDispose(void)
static Time Now(void)
Definition: simulator.cc:179
#define NS_ASSERT_MSG(condition, message)
Definition: assert.h:86
void RefreshHarqProcesses()
Refresh HARQ processes according to the timers.
#define NS_LOG_DEBUG(msg)
Definition: log.h:255
#define NS_LOG_ERROR(msg)
Definition: log.h:237
virtual void SetFfMacCschedSapUser(FfMacCschedSapUser *s)
a unique identifier for an interface.
Definition: type-id.h:44
uint8_t UpdateHarqProcessId(uint16_t rnti)
Update and return a new process Id for the RNTI specified.
TypeId SetParent(TypeId tid)
Definition: type-id.cc:471