pbs_equiv_classes.py 72 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973
  1. # coding: utf-8
  2. # Copyright (C) 1994-2018 Altair Engineering, Inc.
  3. # For more information, contact Altair at www.altair.com.
  4. #
  5. # This file is part of the PBS Professional ("PBS Pro") software.
  6. #
  7. # Open Source License Information:
  8. #
  9. # PBS Pro is free software. You can redistribute it and/or modify it under the
  10. # terms of the GNU Affero General Public License as published by the Free
  11. # Software Foundation, either version 3 of the License, or (at your option) any
  12. # later version.
  13. #
  14. # PBS Pro is distributed in the hope that it will be useful, but WITHOUT ANY
  15. # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
  16. # FOR A PARTICULAR PURPOSE.
  17. # See the GNU Affero General Public License for more details.
  18. #
  19. # You should have received a copy of the GNU Affero General Public License
  20. # along with this program. If not, see <http://www.gnu.org/licenses/>.
  21. #
  22. # Commercial License Information:
  23. #
  24. # For a copy of the commercial license terms and conditions,
  25. # go to: (http://www.pbspro.com/UserArea/agreement.html)
  26. # or contact the Altair Legal Department.
  27. #
  28. # Altair’s dual-license business model allows companies, individuals, and
  29. # organizations to create proprietary derivative works of PBS Pro and
  30. # distribute them - whether embedded or bundled with other software -
  31. # under a commercial license agreement.
  32. #
  33. # Use of Altair’s trademarks, including but not limited to "PBS™",
  34. # "PBS Professional®", and "PBS Pro™" and Altair’s logos is subject to Altair's
  35. # trademark licensing policies.
  36. from tests.functional import *
  37. class TestEquivClass(TestFunctional):
  38. """
  39. Test equivalence class functionality
  40. """
  41. def setUp(self):
  42. TestFunctional.setUp(self)
  43. a = {'resources_available.ncpus': 8}
  44. self.server.create_vnodes('vnode', a, 1, self.mom, usenatvnode=True)
  45. self.scheduler.set_sched_config({'log_filter': 2048})
  46. # capture the start time of the test for log matching
  47. self.t = int(time.time())
  48. def submit_jobs(self, num_jobs=1,
  49. attrs={'Resource_List.select': '1:ncpus=1'},
  50. user=TEST_USER):
  51. """
  52. Submit num_jobs number of jobs with attrs attributes for user.
  53. Return a list of job ids
  54. """
  55. ret_jids = []
  56. for n in range(num_jobs):
  57. J = Job(user, attrs)
  58. jid = self.server.submit(J)
  59. ret_jids += [jid]
  60. return ret_jids
  61. def test_basic(self):
  62. """
  63. Test the basic behavior of job equivalence classes: submit two
  64. different types of jobs and see they are in two different classes
  65. """
  66. self.server.manager(MGR_CMD_SET, SERVER,
  67. {'scheduling': 'False'})
  68. # Eat up all the resources
  69. a = {'Resource_List.select': '1:ncpus=8'}
  70. J = Job(TEST_USER, attrs=a)
  71. self.server.submit(J)
  72. jids1 = self.submit_jobs(3, a)
  73. a = {'Resource_List.select': '1:ncpus=4'}
  74. jids2 = self.submit_jobs(3, a)
  75. self.server.manager(MGR_CMD_SET, SERVER,
  76. {'scheduling': 'True'})
  77. self.scheduler.log_match("Number of job equivalence classes: 2",
  78. starttime=self.t)
  79. def test_select(self):
  80. """
  81. Test to see if jobs with select resources not in the resources line
  82. fall into the same equivalence class
  83. """
  84. self.server.manager(MGR_CMD_CREATE, RSC,
  85. {'type': 'long', 'flag': 'nh'}, id='foo')
  86. # Eat up all the resources
  87. a = {'Resource_List.select': '1:ncpus=8'}
  88. J = Job(TEST_USER, attrs=a)
  89. self.server.submit(J)
  90. a = {'Resource_List.select': '1:ncpus=1:foo=4'}
  91. jids1 = self.submit_jobs(3, a)
  92. a = {'Resource_List.select': '1:ncpus=1:foo=8'}
  93. jids2 = self.submit_jobs(3, a)
  94. self.server.manager(MGR_CMD_SET, SERVER,
  95. {'scheduling': 'True'})
  96. # Two equivalence classes: one for the resource eating job and one
  97. # for the other two jobs. While jobs have different amounts of
  98. # the foo resource, foo is not on the resources line.
  99. self.scheduler.log_match("Number of job equivalence classes: 2",
  100. starttime=self.t)
  101. def test_place(self):
  102. """
  103. Test to see if jobs with different place statements
  104. fall into the different equivalence classes
  105. """
  106. # Eat up all the resources
  107. a = {'Resource_List.select': '1:ncpus=8'}
  108. J = Job(TEST_USER, attrs=a)
  109. self.server.submit(J)
  110. a = {'Resource_List.select': '1:ncpus=1',
  111. 'Resource_List.place': 'free'}
  112. jids1 = self.submit_jobs(3, a)
  113. a = {'Resource_List.select': '1:ncpus=1',
  114. 'Resource_List.place': 'excl'}
  115. jids2 = self.submit_jobs(3, a)
  116. self.server.manager(MGR_CMD_SET, SERVER,
  117. {'scheduling': 'True'})
  118. # Three equivalence classes: one for the resource eating job and
  119. # one for each place statement
  120. self.scheduler.log_match("Number of job equivalence classes: 3",
  121. starttime=self.t)
  122. def test_reslist1(self):
  123. """
  124. Test to see if jobs with resources in Resource_List that are not in
  125. the sched_config resources line fall into the same equivalence class
  126. """
  127. self.server.manager(MGR_CMD_CREATE, RSC, {'type': 'string'},
  128. id='baz')
  129. self.server.manager(MGR_CMD_SET, SERVER,
  130. {'scheduling': 'False'})
  131. # Eat up all the resources
  132. a = {'Resource_List.select': '1:ncpus=8'}
  133. J = Job(TEST_USER, attrs=a)
  134. self.server.submit(J)
  135. a = {'Resource_List.software': 'foo'}
  136. jids1 = self.submit_jobs(3, a)
  137. a = {'Resource_List.software': 'bar'}
  138. jids2 = self.submit_jobs(3, a)
  139. a = {'Resource_List.baz': 'foo'}
  140. jids1 = self.submit_jobs(3, a)
  141. a = {'Resource_List.baz': 'bar'}
  142. jids2 = self.submit_jobs(3, a)
  143. self.server.manager(MGR_CMD_SET, SERVER,
  144. {'scheduling': 'True'})
  145. # Two equivalence classes. One for the resource eating job and
  146. # one for the rest. The rest of the jobs have differing values of
  147. # resources not on the resources line. They fall into one class.
  148. self.scheduler.log_match("Number of job equivalence classes: 2",
  149. starttime=self.t)
  150. def test_reslist2(self):
  151. """
  152. Test to see if jobs with resources in Resource_List that are in the
  153. sched_config resources line fall into the different equivalence classes
  154. """
  155. self.server.manager(MGR_CMD_CREATE, RSC, {'type': 'string'},
  156. id='baz')
  157. self.server.manager(MGR_CMD_SET, SERVER,
  158. {'scheduling': 'False'})
  159. self.scheduler.add_resource('software')
  160. self.scheduler.add_resource('baz')
  161. # Eat up all the resources
  162. a = {'Resource_List.select': '1:ncpus=8'}
  163. J = Job(TEST_USER, attrs=a)
  164. self.server.submit(J)
  165. a = {'Resource_List.software': 'foo'}
  166. jids1 = self.submit_jobs(3, a)
  167. a = {'Resource_List.software': 'bar'}
  168. jids2 = self.submit_jobs(3, a)
  169. a = {'Resource_List.baz': 'foo'}
  170. jids3 = self.submit_jobs(3, a)
  171. a = {'Resource_List.baz': 'bar'}
  172. jids4 = self.submit_jobs(3, a)
  173. self.server.manager(MGR_CMD_SET, SERVER,
  174. {'scheduling': 'True'})
  175. # Three equivalence classes. One for the resource eating job and
  176. # one for each value of software and baz.
  177. self.scheduler.log_match("Number of job equivalence classes: 5",
  178. starttime=self.t)
  179. def test_nolimits(self):
  180. """
  181. Test to see that jobs from different users, groups, and projects
  182. all fall into the same equivalence class when there are no limits
  183. """
  184. self.server.manager(MGR_CMD_SET, SERVER,
  185. {'scheduling': 'False'})
  186. # Eat up all the resources
  187. a = {'Resource_List.select': '1:ncpus=8'}
  188. J = Job(TEST_USER, attrs=a)
  189. self.server.submit(J)
  190. jids1 = self.submit_jobs(3, user=TEST_USER)
  191. jids2 = self.submit_jobs(3, user=TEST_USER2)
  192. b = {'group_list': TSTGRP1, 'Resource_List.select': '1:ncpus=8'}
  193. jids3 = self.submit_jobs(3, a, TEST_USER1)
  194. b = {'group_list': TSTGRP2, 'Resource_List.select': '1:ncpus=8'}
  195. jids4 = self.submit_jobs(3, a, TEST_USER1)
  196. b = {'project': 'p1', 'Resource_List.select': '1:ncpus=8'}
  197. jids5 = self.submit_jobs(3, a)
  198. b = {'project': 'p2', 'Resource_List.select': '1:ncpus=8'}
  199. jids6 = self.submit_jobs(3, a)
  200. self.server.manager(MGR_CMD_SET, SERVER,
  201. {'scheduling': 'True'})
  202. # Two equivalence classes: one for the resource eating job and one
  203. # for the rest. Since there are no limits, user, group, nor project
  204. # are taken into account
  205. self.scheduler.log_match("Number of job equivalence classes: 2",
  206. starttime=self.t)
  207. def test_user(self):
  208. """
  209. Test to see that jobs from different users fall into the same
  210. equivalence class without user limits set
  211. """
  212. self.server.manager(MGR_CMD_SET, SERVER,
  213. {'scheduling': 'False'})
  214. # Eat up all the resources
  215. a = {'Resource_List.select': '1:ncpus=8'}
  216. J = Job(TEST_USER, attrs=a)
  217. self.server.submit(J)
  218. jids1 = self.submit_jobs(3, user=TEST_USER)
  219. jids2 = self.submit_jobs(3, user=TEST_USER2)
  220. self.server.manager(MGR_CMD_SET, SERVER,
  221. {'scheduling': 'True'})
  222. # Two equivalence classes: One for the resource eating job and
  223. # one for the rest. Since there are no limits, both users are
  224. # in one class.
  225. self.scheduler.log_match("Number of job equivalence classes: 2",
  226. starttime=self.t)
  227. def test_user_old(self):
  228. """
  229. Test to see that jobs from different users fall into different
  230. equivalence classes with old style limits set
  231. """
  232. self.server.manager(MGR_CMD_SET, SERVER,
  233. {'scheduling': 'False'})
  234. self.server.manager(MGR_CMD_SET, SERVER,
  235. {'max_user_run': 4})
  236. # Eat up all the resources
  237. a = {'Resource_List.select': '1:ncpus=8'}
  238. J = Job(TEST_USER, attrs=a)
  239. self.server.submit(J)
  240. jids1 = self.submit_jobs(3, user=TEST_USER)
  241. jids2 = self.submit_jobs(3, user=TEST_USER2)
  242. self.server.manager(MGR_CMD_SET, SERVER,
  243. {'scheduling': 'True'})
  244. # Three equivalence classes. One for the resource eating job
  245. # and one for each user.
  246. self.scheduler.log_match("Number of job equivalence classes: 3",
  247. starttime=self.t)
  248. def test_user_server(self):
  249. """
  250. Test to see that jobs from different users fall into different
  251. equivalence classes with server hard limits set
  252. """
  253. self.server.manager(MGR_CMD_SET, SERVER,
  254. {'scheduling': 'False'})
  255. self.server.manager(MGR_CMD_SET, SERVER,
  256. {'max_run': '[u:PBS_GENERIC=4]'})
  257. # Eat up all the resources
  258. a = {'Resource_List.select': '1:ncpus=8'}
  259. J = Job(TEST_USER, attrs=a)
  260. self.server.submit(J)
  261. jids1 = self.submit_jobs(3, user=TEST_USER)
  262. jids2 = self.submit_jobs(3, user=TEST_USER2)
  263. self.server.manager(MGR_CMD_SET, SERVER,
  264. {'scheduling': 'True'})
  265. # Three equivalence classes. One for the resource eating job
  266. # and one for each user.
  267. self.scheduler.log_match("Number of job equivalence classes: 3",
  268. starttime=self.t)
  269. def test_user_server_soft(self):
  270. """
  271. Test to see that jobs from different users fall into different
  272. equivalence classes with server soft limits set
  273. """
  274. self.server.manager(MGR_CMD_SET, SERVER,
  275. {'scheduling': 'False'})
  276. self.server.manager(MGR_CMD_SET, SERVER,
  277. {'max_run_soft': '[u:PBS_GENERIC=4]'})
  278. # Eat up all the resources
  279. a = {'Resource_List.select': '1:ncpus=8'}
  280. J = Job(TEST_USER, attrs=a)
  281. self.server.submit(J)
  282. jids1 = self.submit_jobs(3, user=TEST_USER)
  283. jids2 = self.submit_jobs(3, user=TEST_USER2)
  284. self.server.manager(MGR_CMD_SET, SERVER,
  285. {'scheduling': 'True'})
  286. # Three equivalence classes. One for the resource eating job and
  287. # one for each user.
  288. self.scheduler.log_match("Number of job equivalence classes: 3",
  289. starttime=self.t)
  290. def test_user_queue(self):
  291. """
  292. Test to see that jobs from different users fall into different
  293. equivalence classes with queue limits set
  294. """
  295. self.server.manager(MGR_CMD_SET, SERVER,
  296. {'scheduling': 'False'})
  297. self.server.manager(MGR_CMD_SET, QUEUE,
  298. {'max_run': '[u:PBS_GENERIC=4]'}, id='workq')
  299. # Eat up all the resources
  300. a = {'Resource_List.select': '1:ncpus=8'}
  301. J = Job(TEST_USER, attrs=a)
  302. self.server.submit(J)
  303. jids1 = self.submit_jobs(3, user=TEST_USER)
  304. jids2 = self.submit_jobs(3, user=TEST_USER2)
  305. self.server.manager(MGR_CMD_SET, SERVER,
  306. {'scheduling': 'True'})
  307. # Three equivalence classes. One for the resource eating job and
  308. # one for each user.
  309. self.scheduler.log_match("Number of job equivalence classes: 3",
  310. starttime=self.t)
  311. def test_user_queue_soft(self):
  312. """
  313. Test to see that jobs from different users fall into different
  314. equivalence classes with queue soft limits set
  315. """
  316. self.server.manager(MGR_CMD_SET, SERVER,
  317. {'scheduling': 'False'})
  318. self.server.manager(MGR_CMD_SET, QUEUE,
  319. {'max_run_soft': '[u:PBS_GENERIC=4]'}, id='workq')
  320. # Eat up all the resources
  321. a = {'Resource_List.select': '1:ncpus=8'}
  322. J = Job(TEST_USER, attrs=a)
  323. self.server.submit(J)
  324. jids1 = self.submit_jobs(3, user=TEST_USER)
  325. jids2 = self.submit_jobs(3, user=TEST_USER2)
  326. self.server.manager(MGR_CMD_SET, SERVER,
  327. {'scheduling': 'True'})
  328. # Three equivalence classes. One for the resource eating job and
  329. # one for each user.
  330. self.scheduler.log_match("Number of job equivalence classes: 3",
  331. starttime=self.t)
  332. def test_group(self):
  333. """
  334. Test to see that jobs from different groups fall into the same
  335. equivalence class without group limits set
  336. """
  337. self.server.manager(MGR_CMD_SET, SERVER,
  338. {'scheduling': 'False'})
  339. # Eat up all the resources
  340. a = {'Resource_List.select': '1:ncpus=8'}
  341. J = Job(TEST_USER1, attrs=a)
  342. self.server.submit(J)
  343. a = {'group_list': TSTGRP1}
  344. jids1 = self.submit_jobs(3, a, TEST_USER1)
  345. a = {'group_list': TSTGRP2}
  346. jids2 = self.submit_jobs(3, a, TEST_USER1)
  347. self.server.manager(MGR_CMD_SET, SERVER,
  348. {'scheduling': 'True'})
  349. # Two equivalence classes: One for the resource eating job and
  350. # one for the rest. Since there are no limits, both groups are
  351. # in one class.
  352. self.scheduler.log_match("Number of job equivalence classes: 2",
  353. starttime=self.t)
  354. def test_group_old(self):
  355. """
  356. Test to see that jobs from different groups fall into different
  357. equivalence class old style group limits set
  358. """
  359. self.server.manager(MGR_CMD_SET, SERVER,
  360. {'scheduling': 'False'})
  361. self.server.manager(MGR_CMD_SET, SERVER,
  362. {'max_group_run': 4})
  363. # Eat up all the resources
  364. a = {'Resource_List.select': '1:ncpus=8'}
  365. J = Job(TEST_USER1, attrs=a)
  366. self.server.submit(J)
  367. a = {'group_list': TSTGRP1}
  368. jids1 = self.submit_jobs(3, a, TEST_USER1)
  369. a = {'group_list': TSTGRP2}
  370. jids2 = self.submit_jobs(3, a, TEST_USER1)
  371. self.server.manager(MGR_CMD_SET, SERVER,
  372. {'scheduling': 'True'})
  373. # Three equivalence classes. One for the resource eating job and
  374. # one for each group.
  375. self.scheduler.log_match("Number of job equivalence classes: 3",
  376. starttime=self.t)
  377. def test_group_server(self):
  378. """
  379. Test to see that jobs from different groups fall into different
  380. equivalence class server group limits set
  381. """
  382. self.server.manager(MGR_CMD_SET, SERVER,
  383. {'scheduling': 'False'})
  384. self.server.manager(MGR_CMD_SET, SERVER,
  385. {'max_run': '[g:PBS_GENERIC=4]'})
  386. # Eat up all the resources
  387. a = {'Resource_List.select': '1:ncpus=8'}
  388. J = Job(TEST_USER1, attrs=a)
  389. self.server.submit(J)
  390. a = {'group_list': TSTGRP1}
  391. jids1 = self.submit_jobs(3, a, TEST_USER1)
  392. a = {'group_list': TSTGRP2}
  393. jids2 = self.submit_jobs(3, a, TEST_USER1)
  394. self.server.manager(MGR_CMD_SET, SERVER,
  395. {'scheduling': 'True'})
  396. # Three equivalence classes. One for the resource eating job and
  397. # one for each group.
  398. self.scheduler.log_match("Number of job equivalence classes: 3",
  399. starttime=self.t)
  400. def test_group_server_soft(self):
  401. """
  402. Test to see that jobs from different groups fall into different
  403. equivalence class server soft group limits set
  404. """
  405. self.server.manager(MGR_CMD_SET, SERVER,
  406. {'scheduling': 'False'})
  407. self.server.manager(MGR_CMD_SET, SERVER,
  408. {'max_run_soft': '[g:PBS_GENERIC=4]'})
  409. # Eat up all the resources
  410. a = {'Resource_List.select': '1:ncpus=8'}
  411. J = Job(TEST_USER1, attrs=a)
  412. self.server.submit(J)
  413. a = {'group_list': TSTGRP1}
  414. jids1 = self.submit_jobs(3, a, TEST_USER1)
  415. a = {'group_list': TSTGRP2}
  416. jids2 = self.submit_jobs(3, a, TEST_USER1)
  417. self.server.manager(MGR_CMD_SET, SERVER,
  418. {'scheduling': 'True'})
  419. # Three equivalence classes. One for the resource eating job and
  420. # one for each group.
  421. self.scheduler.log_match("Number of job equivalence classes: 3",
  422. starttime=self.t)
  423. def test_group_queue(self):
  424. """
  425. Test to see that jobs from different groups fall into different
  426. equivalence class queue group limits set
  427. """
  428. self.server.manager(MGR_CMD_SET, SERVER,
  429. {'scheduling': 'False'})
  430. self.server.manager(MGR_CMD_SET, QUEUE,
  431. {'max_run': '[g:PBS_GENERIC=4]'}, id='workq')
  432. # Eat up all the resources
  433. a = {'Resource_List.select': '1:ncpus=8'}
  434. J = Job(TEST_USER1, attrs=a)
  435. self.server.submit(J)
  436. a = {'group_list': TSTGRP1}
  437. jids1 = self.submit_jobs(3, a, TEST_USER1)
  438. a = {'group_list': TSTGRP2}
  439. jids2 = self.submit_jobs(3, a, TEST_USER1)
  440. self.server.manager(MGR_CMD_SET, SERVER,
  441. {'scheduling': 'True'})
  442. self.server.manager(MGR_CMD_SET, SERVER,
  443. {'scheduling': 'True'})
  444. # Three equivalence classes. One for the resource eating job and
  445. # one for each group.
  446. self.scheduler.log_match("Number of job equivalence classes: 3",
  447. starttime=self.t)
  448. def test_group_queue_soft(self):
  449. """
  450. Test to see that jobs from different groups fall into different
  451. equivalence class queue group soft limits set
  452. """
  453. self.server.manager(MGR_CMD_SET, SERVER,
  454. {'scheduling': 'False'})
  455. self.server.manager(MGR_CMD_SET, QUEUE,
  456. {'max_run_soft': '[g:PBS_GENERIC=4]'}, id='workq')
  457. # Eat up all the resources
  458. a = {'Resource_List.select': '1:ncpus=8'}
  459. J = Job(TEST_USER1, attrs=a)
  460. self.server.submit(J)
  461. a = {'group_list': TSTGRP1}
  462. jids1 = self.submit_jobs(3, a, TEST_USER1)
  463. a = {'group_list': TSTGRP2}
  464. jids2 = self.submit_jobs(3, a, TEST_USER1)
  465. self.server.manager(MGR_CMD_SET, SERVER,
  466. {'scheduling': 'True'})
  467. self.server.manager(MGR_CMD_SET, SERVER,
  468. {'scheduling': 'True'})
  469. # Three equivalence classes. One for the resource eating job and
  470. # one for each group.
  471. self.scheduler.log_match("Number of job equivalence classes: 3",
  472. starttime=self.t)
  473. def test_proj(self):
  474. """
  475. Test to see that jobs from different projects fall into the same
  476. equivalence class without project limits set
  477. """
  478. self.server.manager(MGR_CMD_SET, SERVER,
  479. {'scheduling': 'False'})
  480. # Eat up all the resources
  481. a = {'Resource_List.select': '1:ncpus=8'}
  482. J = Job(TEST_USER1, attrs=a)
  483. self.server.submit(J)
  484. a = {'project': 'p1'}
  485. jids1 = self.submit_jobs(3, a)
  486. a = {'project': 'p2'}
  487. jids2 = self.submit_jobs(3, a)
  488. self.server.manager(MGR_CMD_SET, SERVER,
  489. {'scheduling': 'True'})
  490. # Two equivalence classes: One for the resource eating job and
  491. # one for the rest. Since there are no limits, both projects are
  492. # in one class.
  493. self.scheduler.log_match("Number of job equivalence classes: 2",
  494. starttime=self.t)
  495. def test_proj_server(self):
  496. """
  497. Test to see that jobs from different projects fall into different
  498. equivalence classes with server project limits set
  499. """
  500. self.server.manager(MGR_CMD_SET, SERVER,
  501. {'scheduling': 'False'})
  502. self.server.manager(MGR_CMD_SET, SERVER,
  503. {'max_run': '[p:PBS_GENERIC=4]'})
  504. # Eat up all the resources
  505. a = {'Resource_List.select': '1:ncpus=8'}
  506. J = Job(TEST_USER1, attrs=a)
  507. self.server.submit(J)
  508. a = {'project': 'p1'}
  509. jids1 = self.submit_jobs(3, a)
  510. a = {'project': 'p2'}
  511. jids2 = self.submit_jobs(3, a)
  512. self.server.manager(MGR_CMD_SET, SERVER,
  513. {'scheduling': 'True'})
  514. # Three equivalence classes. One for the resource eating job and
  515. # one for each project.
  516. self.scheduler.log_match("Number of job equivalence classes: 3",
  517. starttime=self.t)
  518. def test_proj_server_soft(self):
  519. """
  520. Test to see that jobs from different projects fall into different
  521. equivalence class server project soft limits set
  522. """
  523. self.server.manager(MGR_CMD_SET, SERVER,
  524. {'scheduling': 'False'})
  525. self.server.manager(MGR_CMD_SET, SERVER,
  526. {'max_run_soft': '[p:PBS_GENERIC=4]'})
  527. # Eat up all the resources
  528. a = {'Resource_List.select': '1:ncpus=8'}
  529. J = Job(TEST_USER1, attrs=a)
  530. self.server.submit(J)
  531. a = {'project': 'p1'}
  532. jids1 = self.submit_jobs(3, a)
  533. a = {'project': 'p2'}
  534. jids2 = self.submit_jobs(3, a)
  535. self.server.manager(MGR_CMD_SET, SERVER,
  536. {'scheduling': 'True'})
  537. # Three equivalence classes. One for the resource eating job and
  538. # one for each project.
  539. self.scheduler.log_match("Number of job equivalence classes: 3",
  540. starttime=self.t)
  541. def test_proj_queue(self):
  542. """
  543. Test to see that jobs from different groups fall into different
  544. equivalence class queue project limits set
  545. """
  546. self.server.manager(MGR_CMD_SET, SERVER,
  547. {'scheduling': 'False'})
  548. self.server.manager(MGR_CMD_SET, QUEUE,
  549. {'max_run': '[p:PBS_GENERIC=4]'}, id='workq')
  550. # Eat up all the resources
  551. a = {'Resource_List.select': '1:ncpus=8'}
  552. J = Job(TEST_USER1, attrs=a)
  553. self.server.submit(J)
  554. a = {'project': 'p1'}
  555. jids1 = self.submit_jobs(3, a)
  556. a = {'project': 'p2'}
  557. jids2 = self.submit_jobs(3, a)
  558. self.server.manager(MGR_CMD_SET, SERVER,
  559. {'scheduling': 'True'})
  560. # Three equivalence classes. One for the resource eating job and
  561. # one for each project.
  562. self.scheduler.log_match("Number of job equivalence classes: 3",
  563. starttime=self.t)
  564. def test_proj_queue_soft(self):
  565. """
  566. Test to see that jobs from different groups fall into different
  567. equivalence class queue project soft limits set
  568. """
  569. self.server.manager(MGR_CMD_SET, SERVER,
  570. {'scheduling': 'False'})
  571. self.server.manager(MGR_CMD_SET, QUEUE,
  572. {'max_run_soft': '[p:PBS_GENERIC=4]'}, id='workq')
  573. # Eat up all the resources
  574. a = {'Resource_List.select': '1:ncpus=8'}
  575. J = Job(TEST_USER1, attrs=a)
  576. self.server.submit(J)
  577. a = {'project': 'p1'}
  578. jids1 = self.submit_jobs(3, a)
  579. a = {'project': 'p2'}
  580. jids2 = self.submit_jobs(3, a)
  581. self.server.manager(MGR_CMD_SET, SERVER,
  582. {'scheduling': 'True'})
  583. # Three equivalence classes. One for the resource eating job and
  584. # one for each project.
  585. self.scheduler.log_match("Number of job equivalence classes: 3",
  586. starttime=self.t)
  587. def test_queue(self):
  588. """
  589. Test to see that jobs from different generic queues fall into
  590. the same equivalence class
  591. """
  592. self.server.manager(MGR_CMD_CREATE, QUEUE,
  593. {'queue_type': 'e', 'started': 'True',
  594. 'enabled': 'True'}, id='workq2')
  595. self.server.manager(MGR_CMD_SET, QUEUE,
  596. {'Priority': 120}, id='workq')
  597. self.server.manager(MGR_CMD_SET, SERVER,
  598. {'scheduling': 'False'})
  599. # Eat up all the resources
  600. a = {'Resource_List.select': '1:ncpus=8'}
  601. J = Job(TEST_USER1, attrs=a)
  602. self.server.submit(J)
  603. a = {'queue': 'workq'}
  604. jids1 = self.submit_jobs(3, a)
  605. a = {'queue': 'workq2'}
  606. jids2 = self.submit_jobs(3, a)
  607. self.server.manager(MGR_CMD_SET, SERVER,
  608. {'scheduling': 'True'})
  609. # Two equivalence classes. One for the resource eating job and
  610. # one for the rest. There is nothing to differentiate the queues
  611. # so all jobs are in one class.
  612. self.scheduler.log_match("Number of job equivalence classes: 2",
  613. starttime=self.t)
  614. def test_queue_limits(self):
  615. """
  616. Test to see if jobs in a queue with limits use their queue as part
  617. of what defines their equivalence class.
  618. """
  619. self.server.manager(MGR_CMD_CREATE, QUEUE,
  620. {'queue_type': 'e', 'started': 'True',
  621. 'enabled': 'True'}, id='workq2')
  622. self.server.manager(MGR_CMD_CREATE, QUEUE,
  623. {'queue_type': 'e', 'started': 'True',
  624. 'enabled': 'True'}, id='limits1')
  625. self.server.manager(MGR_CMD_CREATE, QUEUE,
  626. {'queue_type': 'e', 'started': 'True',
  627. 'enabled': 'True'}, id='limits2')
  628. self.server.manager(MGR_CMD_SET, QUEUE,
  629. {'Priority': 120}, id='workq')
  630. self.server.manager(MGR_CMD_SET, QUEUE,
  631. {'max_run': '[o:PBS_ALL=20]'}, id='limits1')
  632. self.server.manager(MGR_CMD_SET, QUEUE,
  633. {'max_run_soft': '[o:PBS_ALL=20]'}, id='limits2')
  634. self.server.manager(MGR_CMD_SET, SERVER,
  635. {'scheduling': 'False'})
  636. # Eat up all the resources
  637. a = {'Resource_List.select': '1:ncpus=8'}
  638. J = Job(TEST_USER1, attrs=a)
  639. self.server.submit(J)
  640. a = {'queue': 'workq'}
  641. jids1 = self.submit_jobs(3, a)
  642. a = {'queue': 'workq2'}
  643. jids2 = self.submit_jobs(3, a)
  644. a = {'queue': 'limits1'}
  645. jids3 = self.submit_jobs(3, a)
  646. a = {'queue': 'limits2'}
  647. jids4 = self.submit_jobs(3, a)
  648. self.server.manager(MGR_CMD_SET, SERVER,
  649. {'scheduling': 'True'})
  650. # 4 equivalence classes. One for the resource eating job and
  651. # One for the queues without limits and one
  652. # each for the two queues with limits.
  653. self.scheduler.log_match("Number of job equivalence classes: 4",
  654. starttime=self.t)
  655. def test_queue_nodes(self):
  656. """
  657. Test to see if jobs that are submitted into a queue with nodes
  658. associated with it fall into their own equivalence class
  659. """
  660. a = {'resources_available.ncpus': 8}
  661. self.server.create_vnodes('vnode', a, 2, self.mom, usenatvnode=True)
  662. self.server.manager(MGR_CMD_CREATE, QUEUE,
  663. {'queue_type': 'e', 'started': 'True',
  664. 'enabled': 'True', 'Priority': 100}, id='workq2')
  665. self.server.manager(MGR_CMD_CREATE, QUEUE,
  666. {'queue_type': 'e', 'started': 'True',
  667. 'enabled': 'True'}, id='nodes_queue')
  668. self.server.manager(MGR_CMD_SET, NODE,
  669. {'queue': 'nodes_queue'}, id='vnode[0]')
  670. self.server.manager(MGR_CMD_SET, QUEUE,
  671. {'Priority': 120}, id='workq')
  672. self.server.manager(MGR_CMD_SET, SERVER,
  673. {'scheduling': 'False'})
  674. # Eat up all the resources on the normal node
  675. a = {'Resource_List.select': '1:ncpus=8', 'queue': 'workq'}
  676. J = Job(TEST_USER1, attrs=a)
  677. self.server.submit(J)
  678. # Eat up all the resources on node associated to nodes_queue
  679. a = {'Resource_List.select': '1:ncpus=4', 'queue': 'nodes_queue'}
  680. J = Job(TEST_USER1, attrs=a)
  681. self.server.submit(J)
  682. a = {'Resource_List.select': '1:ncpus=4', 'queue': 'workq'}
  683. jids1 = self.submit_jobs(3, a)
  684. a = {'Resource_List.select': '1:ncpus=4', 'queue': 'workq2'}
  685. jids2 = self.submit_jobs(3, a)
  686. a = {'Resource_List.select': '1:ncpus=4', 'queue': 'nodes_queue'}
  687. jids3 = self.submit_jobs(3, a)
  688. self.server.manager(MGR_CMD_SET, SERVER,
  689. {'scheduling': 'True'})
  690. # Three equivalence classes. One for the resource eating job and
  691. # one class for the queue with nodes associated with it.
  692. # One class for normal queues.
  693. self.scheduler.log_match("Number of job equivalence classes: 3",
  694. starttime=self.t)
  695. def test_prime_queue(self):
  696. """
  697. Test to see if a job in a primetime queue has its queue be part of
  698. what defines its equivalence class. Also see that jobs in anytime
  699. queues do not use queue as part of what determines their class
  700. """
  701. # Force primetime
  702. self.scheduler.holidays_set_day("weekday", prime="all",
  703. nonprime="none")
  704. self.scheduler.holidays_set_day("saturday", prime="all",
  705. nonprime="none")
  706. self.scheduler.holidays_set_day("sunday", prime="all",
  707. nonprime="none")
  708. self.server.manager(MGR_CMD_CREATE, QUEUE,
  709. {'queue_type': 'e', 'started': 'True',
  710. 'enabled': 'True', 'Priority': 100},
  711. id='anytime1')
  712. self.server.manager(MGR_CMD_CREATE, QUEUE,
  713. {'queue_type': 'e', 'started': 'True',
  714. 'enabled': 'True'}, id='anytime2')
  715. self.server.manager(MGR_CMD_CREATE, QUEUE,
  716. {'queue_type': 'e', 'started': 'True',
  717. 'enabled': 'True', 'Priority': 100},
  718. id='p_queue1')
  719. self.server.manager(MGR_CMD_CREATE, QUEUE,
  720. {'queue_type': 'e', 'started': 'True',
  721. 'enabled': 'True'}, id='p_queue2')
  722. self.server.manager(MGR_CMD_SET, QUEUE,
  723. {'Priority': 120}, id='workq')
  724. self.server.manager(MGR_CMD_SET, SERVER,
  725. {'scheduling': 'False'})
  726. # Eat up all the resources
  727. a = {'Resource_List.select': '1:ncpus=8', 'queue': 'workq'}
  728. J = Job(TEST_USER1, attrs=a)
  729. self.server.submit(J)
  730. a = {'Resource_List.select': '1:ncpus=4', 'queue': 'anytime1'}
  731. jids1 = self.submit_jobs(3, a)
  732. a = {'Resource_List.select': '1:ncpus=4', 'queue': 'anytime2'}
  733. jids2 = self.submit_jobs(3, a)
  734. a = {'Resource_List.select': '1:ncpus=4', 'queue': 'p_queue1'}
  735. jids3 = self.submit_jobs(3, a)
  736. a = {'Resource_List.select': '1:ncpus=4', 'queue': 'p_queue2'}
  737. jids4 = self.submit_jobs(3, a)
  738. self.server.manager(MGR_CMD_SET, SERVER,
  739. {'scheduling': 'True'})
  740. # Four equivalence classes. One for the resource eating job and
  741. # one for the normal queues and one for each prime time queue
  742. self.scheduler.log_match("Number of job equivalence classes: 4",
  743. starttime=self.t)
  744. def test_non_prime_queue(self):
  745. """
  746. Test to see if a job in a non-primetime queue has its queue be part of
  747. what defines its equivalence class. Also see that jobs in anytime
  748. queues do not use queue as part of what determines their class
  749. """
  750. # Force non-primetime
  751. self.scheduler.holidays_set_day("weekday", prime="none",
  752. nonprime="all")
  753. self.scheduler.holidays_set_day("saturday", prime="none",
  754. nonprime="all")
  755. self.scheduler.holidays_set_day("sunday", prime="none",
  756. nonprime="all")
  757. self.server.manager(MGR_CMD_CREATE, QUEUE,
  758. {'queue_type': 'e', 'started': 'True',
  759. 'enabled': 'True', 'Priority': 100},
  760. id='anytime1')
  761. self.server.manager(MGR_CMD_CREATE, QUEUE,
  762. {'queue_type': 'e', 'started': 'True',
  763. 'enabled': 'True'}, id='anytime2')
  764. self.server.manager(MGR_CMD_CREATE, QUEUE,
  765. {'queue_type': 'e', 'started': 'True',
  766. 'enabled': 'True', 'Priority': 100},
  767. id='np_queue1')
  768. self.server.manager(MGR_CMD_CREATE, QUEUE,
  769. {'queue_type': 'e', 'started': 'True',
  770. 'enabled': 'True'}, id='np_queue2')
  771. self.server.manager(MGR_CMD_SET, QUEUE,
  772. {'Priority': 120}, id='workq')
  773. self.server.manager(MGR_CMD_SET, SERVER,
  774. {'scheduling': 'False'})
  775. # Eat up all the resources
  776. a = {'Resource_List.select': '1:ncpus=8', 'queue': 'workq'}
  777. J = Job(TEST_USER1, attrs=a)
  778. self.server.submit(J)
  779. a = {'Resource_List.select': '1:ncpus=4', 'queue': 'anytime1'}
  780. jids1 = self.submit_jobs(3, a)
  781. a = {'Resource_List.select': '1:ncpus=4', 'queue': 'anytime2'}
  782. jids2 = self.submit_jobs(3, a)
  783. a = {'Resource_List.select': '1:ncpus=4', 'queue': 'np_queue1'}
  784. jids3 = self.submit_jobs(3, a)
  785. a = {'Resource_List.select': '1:ncpus=4', 'queue': 'np_queue2'}
  786. jids4 = self.submit_jobs(3, a)
  787. self.server.manager(MGR_CMD_SET, SERVER,
  788. {'scheduling': 'True'})
  789. # Four equivalence classes. One for the resource eating job and
  790. # one for the normal queues and one for each non-prime time queue
  791. self.scheduler.log_match("Number of job equivalence classes: 4",
  792. starttime=self.t)
  793. def test_ded_time_queue(self):
  794. """
  795. Test to see if a job in a dedicated time queue has its queue be part
  796. of what defines its equivalence class. Also see that jobs in anytime
  797. queues do not use queue as part of what determines their class
  798. """
  799. # Force dedicated time
  800. now = time.time()
  801. self.scheduler.add_dedicated_time(start=now - 5, end=now + 3600)
  802. self.server.manager(MGR_CMD_CREATE, QUEUE,
  803. {'queue_type': 'e', 'started': 'True',
  804. 'enabled': 'True', 'Priority': 100},
  805. id='ded_queue1')
  806. self.server.manager(MGR_CMD_CREATE, QUEUE,
  807. {'queue_type': 'e', 'started': 'True',
  808. 'enabled': 'True', 'Priority': 100},
  809. id='ded_queue2')
  810. self.server.manager(MGR_CMD_SET, QUEUE,
  811. {'Priority': 120}, id='workq')
  812. self.server.manager(MGR_CMD_SET, SERVER,
  813. {'scheduling': 'False'})
  814. # Eat up all the resources
  815. a = {'Resource_List.select': '1:ncpus=8', 'queue': 'workq'}
  816. J = Job(TEST_USER1, attrs=a)
  817. self.server.submit(J)
  818. a = {'Resource_List.select': '1:ncpus=4',
  819. 'Resource_List.walltime': 600, 'queue': 'ded_queue1'}
  820. jids1 = self.submit_jobs(3, a)
  821. a = {'Resource_List.select': '1:ncpus=4',
  822. 'Resource_List.walltime': 600, 'queue': 'ded_queue2'}
  823. jids2 = self.submit_jobs(3, a)
  824. self.server.manager(MGR_CMD_SET, SERVER,
  825. {'scheduling': 'True'})
  826. # Three equivalence classes: One for the resource eating job and
  827. # one for each dedicated time queue job
  828. self.scheduler.log_match("Number of job equivalence classes: 3",
  829. starttime=self.t)
  830. def test_job_array(self):
  831. """
  832. Test that various job types will fall into single equivalence
  833. class with same type of request.
  834. """
  835. # Eat up all the resources
  836. a = {'Resource_List.select': '1:ncpus=8', 'queue': 'workq'}
  837. J = Job(TEST_USER1, attrs=a)
  838. self.server.submit(J)
  839. # Submit a job array
  840. j = Job(TEST_USER)
  841. j.set_attributes(
  842. {ATTR_J: '1-3:1',
  843. 'Resource_List.select': '1:ncpus=8',
  844. 'queue': 'workq'})
  845. jid = self.server.submit(j)
  846. self.server.manager(MGR_CMD_SET, SERVER,
  847. {'scheduling': 'True'})
  848. # One equivalence class
  849. self.scheduler.log_match("Number of job equivalence classes: 1",
  850. starttime=self.t)
  851. def test_reservation(self):
  852. """
  853. Test that similar jobs inside reservations falls under same
  854. equivalence class.
  855. """
  856. # Submit a reservation
  857. a = {'Resource_List.select': '1:ncpus=3',
  858. 'reserve_start': int(time.time()) + 10,
  859. 'reserve_end': int(time.time()) + 300, }
  860. r = Reservation(TEST_USER, a)
  861. rid = self.server.submit(r)
  862. a = {'reserve_state': (MATCH_RE, "RESV_CONFIRMED|2")}
  863. self.server.expect(RESV, a, id=rid)
  864. rname = rid.split('.')
  865. # Submit jobs inside reservation
  866. a = {ATTR_queue: rname[0], 'Resource_List.select': '1:ncpus=1'}
  867. jids1 = self.submit_jobs(3, a)
  868. # Submit jobs outside of reservations
  869. jids2 = self.submit_jobs(3)
  870. self.server.manager(MGR_CMD_SET, SERVER,
  871. {'scheduling': 'True'})
  872. # Two equivalence classes: one for jobs inside reservations
  873. # and one for regular jobs
  874. self.scheduler.log_match("Number of job equivalence classes: 2",
  875. starttime=self.t)
  876. def test_time_limit(self):
  877. """
  878. Test that various time limits will have their own
  879. equivalence classes
  880. """
  881. # Submit a reservation
  882. a = {'Resource_List.select': '1:ncpus=8',
  883. 'reserve_start': time.time() + 30,
  884. 'reserve_end': time.time() + 300, }
  885. r = Reservation(TEST_USER, a)
  886. rid = self.server.submit(r)
  887. a = {'reserve_state': (MATCH_RE, "RESV_CONFIRMED|2")}
  888. self.server.expect(RESV, a, id=rid)
  889. rname = rid.split('.')
  890. # Submit jobs with cput limit inside reservation
  891. a = {'Resource_List.cput': '20', ATTR_queue: rname[0]}
  892. jid1 = self.submit_jobs(2, a)
  893. # Submit jobs with min and max walltime inside reservation
  894. a = {'Resource_List.min_walltime': '20',
  895. 'Resource_List.max_walltime': '200',
  896. ATTR_queue: rname[0]}
  897. jid2 = self.submit_jobs(2, a)
  898. # Submit jobs with regular walltime inside reservation
  899. a = {'Resource_List.walltime': '20', ATTR_queue: rname[0]}
  900. jid3 = self.submit_jobs(2, a)
  901. self.server.manager(MGR_CMD_SET, SERVER,
  902. {'scheduling': 'True'})
  903. # Three equivalence classes: one for each job set
  904. self.scheduler.log_match("Number of job equivalence classes: 3",
  905. starttime=self.t)
  906. def test_fairshare(self):
  907. """
  908. Test that scheduler do not create any equiv classes
  909. if fairshare is set
  910. """
  911. a = {'fair_share': 'true ALL',
  912. 'fairshare_usage_res': 'ncpus*walltime',
  913. 'unknown_shares': 10}
  914. self.scheduler.set_sched_config(a)
  915. # Submit jobs as different user
  916. jid1 = self.submit_jobs(8, user=TEST_USER1)
  917. jid2 = self.submit_jobs(8, user=TEST_USER2)
  918. self.server.manager(MGR_CMD_SET, SERVER,
  919. {'scheduling': 'True'})
  920. # One equivalence class
  921. self.scheduler.log_match("Number of job equivalence classes: 1",
  922. starttime=self.t)
  923. # Wait sometime for jobs to accumulate walltime
  924. time.sleep(20)
  925. # Submit another job
  926. self.t = int(time.time())
  927. jid3 = self.submit_jobs(1, user=TEST_USER3)
  928. # Look at the job equivalence classes again
  929. self.scheduler.log_match("Number of job equivalence classes: 1",
  930. starttime=self.t)
  931. def test_server_hook(self):
  932. """
  933. Test that job equivalence classes are updated
  934. when job attributes get updated by hooks
  935. """
  936. # Define a queuejob hook
  937. hook1 = """
  938. import pbs
  939. e = pbs.event()
  940. e.job.Resource_List["walltime"] = 200
  941. """
  942. # Define a runjob hook
  943. hook2 = """
  944. import pbs
  945. e = pbs.event()
  946. e.job.Resource_List["cput"] = 40
  947. """
  948. # Define a modifyjob hook
  949. hook3 = """
  950. import pbs
  951. e = pbs.event()
  952. e.job.Resource_List["cput"] = 20
  953. """
  954. # Create a queuejob hook
  955. a = {'event': 'queuejob', 'enabled': 'True'}
  956. self.server.create_import_hook("t_q", a, hook1)
  957. # Create a runjob hook
  958. a = {'event': 'runjob', 'enabled': 'True'}
  959. self.server.create_import_hook("t_r", a, hook2)
  960. # Create a modifyjob hook
  961. a = {'event': 'modifyjob', 'enabled': 'True'}
  962. self.server.create_import_hook("t_m", a, hook3)
  963. # Turn scheduling off
  964. self.server.manager(MGR_CMD_SET, SERVER,
  965. {'scheduling': 'False'})
  966. # Submit jobs as different users
  967. a = {'Resource_List.ncpus': 2}
  968. jid1 = self.submit_jobs(4, a, user=TEST_USER1)
  969. jid2 = self.submit_jobs(4, a, user=TEST_USER2)
  970. jid3 = self.submit_jobs(4, a, user=TEST_USER3)
  971. self.server.manager(MGR_CMD_SET, SERVER,
  972. {'scheduling': 'True'})
  973. # One equivalence class
  974. self.scheduler.log_match("Number of job equivalence classes: 1",
  975. starttime=self.t)
  976. # Alter a queued job
  977. self.t = int(time.time())
  978. self.server.alterjob(jid3[2], {ATTR_N: "test"})
  979. self.server.manager(MGR_CMD_SET, SERVER,
  980. {'scheduling': 'True'})
  981. # Three equivalence classes: one is for queued jobs that
  982. # do not have cput set. 2 for the different cputime value
  983. # set by runjob and modifyjob hook
  984. self.scheduler.log_match("Number of job equivalence classes: 3",
  985. starttime=self.t)
  986. def test_mom_hook(self):
  987. """
  988. Test for job equivalence classes with mom hooks.
  989. """
  990. # Create resource
  991. attrib = {}
  992. attrib['type'] = "string_array"
  993. attrib['flag'] = 'h'
  994. self.server.manager(MGR_CMD_CREATE, RSC, attrib, id='foo_str')
  995. # Create vnodes
  996. a = {'resources_available.ncpus': 4,
  997. 'resources_available.foo_str': "foo,bar,buba"}
  998. self.server.create_vnodes('vnode', a, 4, self.mom)
  999. # Add resources to sched_config
  1000. self.scheduler.add_resource("foo_str")
  1001. # Create execjob_begin hook
  1002. hook1 = """
  1003. import pbs
  1004. e = pbs.event()
  1005. j = e.job
  1006. if j.Resource_List["host"] == "vnode[0]":
  1007. j.Resource_List["foo_str"] = "foo"
  1008. elif j.Resource_List["host"] == "vnode[1]":
  1009. j.Resource_List["foo_str"] = "bar"
  1010. else:
  1011. j.Resource_List["foo_str"] = "buba"
  1012. """
  1013. a = {'event': "execjob_begin", 'enabled': 'True'}
  1014. self.server.create_import_hook("test", a, hook1)
  1015. # Turn off the scheduling
  1016. self.server.manager(MGR_CMD_SET, SERVER,
  1017. {'scheduling': 'False'})
  1018. # Submit jobs
  1019. a = {'Resource_List.select': "vnode=vnode[0]:ncpus=2"}
  1020. jid1 = self.submit_jobs(2, a)
  1021. a = {'Resource_List.select': "vnode=vnode[1]:ncpus=2"}
  1022. jid2 = self.submit_jobs(2, a)
  1023. a = {'Resource_List.select': "vnode=vnode[2]:ncpus=2"}
  1024. jid3 = self.submit_jobs(2, a)
  1025. # Turn on the scheduling
  1026. self.server.manager(MGR_CMD_SET, SERVER,
  1027. {'scheduling': 'True'})
  1028. # Three equivalence class for each string value
  1029. # set by mom_hook
  1030. self.scheduler.log_match("Number of job equivalence classes: 3",
  1031. starttime=self.t)
  1032. def test_incr_decr(self):
  1033. """
  1034. Test for varying job equivalence class values
  1035. """
  1036. # Submit a job
  1037. j = Job(TEST_USER,
  1038. attrs={'Resource_List.select': '1:ncpus=8',
  1039. 'Resource_List.walltime': '20'})
  1040. jid1 = self.server.submit(j)
  1041. # One equivalance class
  1042. self.scheduler.log_match("Number of job equivalence classes: 1",
  1043. starttime=self.t)
  1044. # Submit another job
  1045. self.t = int(time.time())
  1046. j = Job(TEST_USER,
  1047. attrs={'Resource_List.select': '1:ncpus=8',
  1048. 'Resource_List.walltime': '30'})
  1049. jid2 = self.server.submit(j)
  1050. # Two equivalence classes
  1051. self.scheduler.log_match("Number of job equivalence classes: 2",
  1052. starttime=self.t)
  1053. # Submit another job
  1054. self.t = int(time.time())
  1055. j = Job(TEST_USER,
  1056. attrs={'Resource_List.select': '1:ncpus=8',
  1057. 'Resource_List.walltime': '40'})
  1058. jid3 = self.server.submit(j)
  1059. # Three equivalence classes
  1060. self.scheduler.log_match("Number of job equivalence classes: 3",
  1061. starttime=self.t)
  1062. # Delete job1
  1063. self.server.delete(jid1, wait='True')
  1064. # Rerun scheduling cycle
  1065. self.t = int(time.time())
  1066. self.server.manager(MGR_CMD_SET, SERVER,
  1067. {'scheduling': 'True'})
  1068. # Two equivalence classes
  1069. self.scheduler.log_match("Number of job equivalence classes: 2",
  1070. starttime=self.t)
  1071. # Delete job2
  1072. self.server.delete(jid2, wait='true')
  1073. # Rerun scheduling cycle
  1074. self.t = int(time.time())
  1075. self.server.manager(MGR_CMD_SET, SERVER,
  1076. {'scheduling': 'True'})
  1077. # One equivalence classes
  1078. self.scheduler.log_match("Number of job equivalence classes: 1",
  1079. starttime=self.t)
  1080. # Delete job3
  1081. self.server.delete(jid3, wait='true')
  1082. time.sleep(1) # adding delay to avoid race condition
  1083. # Rerun scheduling cycle
  1084. self.t = int(time.time())
  1085. self.server.manager(MGR_CMD_SET, SERVER,
  1086. {'scheduling': 'True'})
  1087. # No message for equivalence class
  1088. self.scheduler.log_match("Number of job equivalence classes",
  1089. starttime=self.t,
  1090. existence=False)
  1091. self.logger.info(
  1092. "Number of job equivalence classes message " +
  1093. "not present when there are no jobs as expected")
  1094. def test_server_queue_limit(self):
  1095. """
  1096. Test with mix of hard and soft limits
  1097. on resources for users and groups
  1098. """
  1099. # Create workq2
  1100. self.server.manager(MGR_CMD_CREATE, QUEUE,
  1101. {'queue_type': 'e', 'started': 'True',
  1102. 'enabled': 'True'}, id='workq2')
  1103. # Set queue limit
  1104. a = {
  1105. 'max_run': '[o:PBS_ALL=100],[g:PBS_GENERIC=20],\
  1106. [u:PBS_GENERIC=20],[g:tstgrp01 = 8],[u:pbsuser1=10]'}
  1107. self.server.manager(MGR_CMD_SET, QUEUE,
  1108. a, id='workq2')
  1109. a = {'max_run_res.ncpus':
  1110. '[o:PBS_ALL=100],[g:PBS_GENERIC=50],\
  1111. [u:PBS_GENERIC=20],[g:tstgrp01=13],[u:pbsuser1=12]'}
  1112. self.server.manager(MGR_CMD_SET, QUEUE, a, id='workq2')
  1113. a = {'max_run_res_soft.ncpus':
  1114. '[o:PBS_ALL=100],[g:PBS_GENERIC=30],\
  1115. [u:PBS_GENERIC=10],[g:tstgrp01=10],[u:pbsuser1=10]'}
  1116. self.server.manager(MGR_CMD_SET, QUEUE, a, id='workq2')
  1117. # Create server limits
  1118. a = {
  1119. 'max_run': '[o:PBS_ALL=100],[g:PBS_GENERIC=50],\
  1120. [u:PBS_GENERIC=20],[g:tstgrp01=13],[u:pbsuser1=13]'}
  1121. self.server.manager(MGR_CMD_SET, SERVER, a)
  1122. a = {'max_run_soft':
  1123. '[o:PBS_ALL=50],[g:PBS_GENERIC=25],[u:PBS_GENERIC=10],\
  1124. [g:tstgrp01=10],[u:pbsuser1=10]'}
  1125. self.server.manager(MGR_CMD_SET, SERVER, a)
  1126. # Turn scheduling off
  1127. self.server.manager(MGR_CMD_SET, SERVER, {'scheduling': 'false'})
  1128. # Submit jobs as pbsuser1 from group tstgrp01 in workq2
  1129. a = {'Resource_List.select': '1:ncpus=1',
  1130. 'group_list': TSTGRP1, ATTR_q: 'workq2'}
  1131. jid1 = self.submit_jobs(10, a, TEST_USER1)
  1132. # Submit jobs as pbsuser1 from group tstgrp02 in workq2
  1133. a = {'Resource_List.select': '1:ncpus=1',
  1134. 'group_list': TSTGRP2, ATTR_q: 'workq2'}
  1135. jid2 = self.submit_jobs(10, a, TEST_USER1)
  1136. # Submit jobs as pbsuser2 from tstgrp01 in workq2
  1137. a = {'Resource_List.select': '1:ncpus=1',
  1138. 'group_list': TSTGRP1, ATTR_q: 'workq2'}
  1139. jid3 = self.submit_jobs(10, a, TEST_USER2)
  1140. # Submit jobs as pbsuser2 from tstgrp03 in workq2
  1141. a = {'Resource_List.select': '1:ncpus=1',
  1142. 'group_list': TSTGRP3, ATTR_q: 'workq2'}
  1143. jid4 = self.submit_jobs(10, a, TEST_USER2)
  1144. # Submit jobs as pbsuser1 from tstgrp01 in workq
  1145. a = {'Resource_List.select': '1:ncpus=1',
  1146. 'group_list': TSTGRP1, ATTR_q: 'workq'}
  1147. jid5 = self.submit_jobs(10, a, TEST_USER1)
  1148. # Submit jobs as pbsuser1 from tstgrp02 in workq
  1149. a = {'Resource_List.select': '1:ncpus=1',
  1150. 'group_list': TSTGRP2, ATTR_q: 'workq'}
  1151. jid6 = self.submit_jobs(10, a, TEST_USER1)
  1152. # Submit jobs as pbsuser2 from tstgrp01 in workq
  1153. a = {'Resource_List.select': '1:ncpus=1',
  1154. 'group_list': TSTGRP1, ATTR_q: 'workq'}
  1155. jid7 = self.submit_jobs(10, a, TEST_USER2)
  1156. # Submit jobs as pbsuser2 from tstgrp03 in workq
  1157. a = {'Resource_List.select': '1:ncpus=1',
  1158. 'group_list': TSTGRP3, ATTR_q: 'workq'}
  1159. jid8 = self.submit_jobs(10, a, TEST_USER2)
  1160. self.t = int(time.time())
  1161. # Run only one cycle
  1162. self.server.manager(MGR_CMD_SET, MGR_OBJ_SERVER,
  1163. {'scheduling': 'True'})
  1164. self.server.manager(MGR_CMD_SET, MGR_OBJ_SERVER,
  1165. {'scheduling': 'False'})
  1166. # Eight equivalence classes; one for each combination of
  1167. # users and groups
  1168. self.scheduler.log_match("Number of job equivalence classes: 8",
  1169. starttime=self.t)
  1170. def test_preemption(self):
  1171. """
  1172. Suspended jobs are placed into their own equivalence class. If
  1173. they remain in the class they were in when they were queued, they
  1174. can stop other jobs in that class from running.
  1175. Equivalence classes are created in query-order. Test to see if
  1176. suspended job which comes first in query-order is added to its own
  1177. class.
  1178. """
  1179. a = {'resources_available.ncpus': 1}
  1180. self.server.create_vnodes('vnode', a, 4, self.mom, usenatvnode=True)
  1181. a = {'queue_type': 'e', 'started': 't',
  1182. 'enabled': 't', 'Priority': 150}
  1183. self.server.manager(MGR_CMD_CREATE, QUEUE, a, id='expressq')
  1184. (jid1, ) = self.submit_jobs(1)
  1185. (jid2, ) = self.submit_jobs(1)
  1186. self.server.expect(JOB, {'job_state': 'R'}, id=jid1)
  1187. self.server.expect(JOB, {'job_state': 'R'}, id=jid2)
  1188. a = {'Resource_List.ncpus': 3, 'queue': 'expressq'}
  1189. (jid3,) = self.submit_jobs(1, a)
  1190. self.server.expect(JOB, {'job_state': 'R'}, id=jid3)
  1191. # Make sure one of the job is suspended
  1192. sus_job = self.server.select(attrib={'job_state': 'S'})
  1193. self.assertEqual(len(sus_job), 1,
  1194. "Either no or more jobs are suspended")
  1195. self.logger.info("Job %s is suspended" % sus_job[0])
  1196. (jid4,) = self.submit_jobs(1)
  1197. self.server.expect(JOB, 'comment', op=SET)
  1198. self.server.expect(JOB, {'job_state': 'Q'}, id=jid4)
  1199. # 3 equivalence classes: 1 for jid2 and jid4; 1 for jid3; and 1 for
  1200. # jid1 by itself because it is suspended.
  1201. self.scheduler.log_match("Number of job equivalence classes: 3",
  1202. starttime=self.t)
  1203. # Make sure suspended job is in its own class. If it is still in
  1204. # jid4's class jid4 will not run. This is because suspended job
  1205. # will be considered first and mark the entire class as can not run.
  1206. if sus_job[0] == jid2:
  1207. self.server.deljob(jid1, wait=True)
  1208. else:
  1209. self.server.deljob(jid2, wait=True)
  1210. self.server.expect(JOB, {'job_state': 'R'}, id=jid4)
  1211. def test_preemption2(self):
  1212. """
  1213. Suspended jobs are placed into their own equivalence class. If
  1214. they remain in the class they were in when they were queued, they
  1215. can stop other jobs in that class from running.
  1216. Equivalence classes are created in query-order. Test to see if
  1217. suspended job which comes later in query-order is added to its own
  1218. class instead of the class it was in when it was queued.
  1219. """
  1220. a = {'resources_available.ncpus': 1}
  1221. self.server.create_vnodes('vnode', a, 4, self.mom, usenatvnode=True)
  1222. a = {'queue_type': 'e', 'started': 't',
  1223. 'enabled': 't', 'Priority': 150}
  1224. self.server.manager(MGR_CMD_CREATE, QUEUE, a, id='expressq')
  1225. (jid1,) = self.submit_jobs(1)
  1226. self.server.expect(JOB, {'job_state': 'R'}, id=jid1)
  1227. (jid2,) = self.submit_jobs(1)
  1228. self.server.expect(JOB, {'job_state': 'R'}, id=jid2)
  1229. # Jobs most recently started are suspended first.
  1230. # Sleep for a second to force jid3 to be suspended.
  1231. time.sleep(1)
  1232. (jid3,) = self.submit_jobs(1)
  1233. self.server.expect(JOB, {'job_state': 'R'}, id=jid3)
  1234. a = {'Resource_List.ncpus': 2, 'queue': 'expressq'}
  1235. (jid4,) = self.submit_jobs(1, a)
  1236. self.server.expect(JOB, {'job_state': 'R'}, id=jid1)
  1237. self.server.expect(JOB, {'job_state': 'R'}, id=jid2)
  1238. self.server.expect(JOB, {'job_state': 'S'}, id=jid3)
  1239. self.server.expect(JOB, {'job_state': 'R'}, id=jid4)
  1240. (jid5,) = self.submit_jobs(1)
  1241. self.server.expect(JOB, 'comment', op=SET)
  1242. self.server.expect(JOB, {'job_state': 'Q'}, id=jid5)
  1243. # 3 equivalence classes: 1 for jid1, jid2, and jid5; 1 for jid4;
  1244. # jid3 by itself because it is suspended.
  1245. self.scheduler.log_match("Number of job equivalence classes: 3",
  1246. starttime=self.t)
  1247. # Make sure jid3 is in its own class. If it is still in jid5's class
  1248. # jid5 will not run. This is because jid3 will be considered first
  1249. # and mark the entire class as can not run.
  1250. self.server.deljob(jid2, wait=True)
  1251. self.server.expect(JOB, {'job_state': 'R'}, id=jid5)
  1252. def test_multiple_job_preemption_order(self):
  1253. """
  1254. Test that when multiple jobs from same eqivalence class are
  1255. preempted in reverse order they were created in and they are placed
  1256. into the same equivalence class
  1257. 2) Test that for jobs of same type, suspended job which comes
  1258. later in query-order is in its own equivalence class, and can
  1259. be picked up to run along with the queued job in
  1260. the same scheduling cycle.
  1261. """
  1262. # Create 1 vnode with 3 ncpus
  1263. a = {'resources_available.ncpus': 3}
  1264. self.server.create_vnodes('vnode', a, 1, self.mom, usenatvnode=True)
  1265. # Create expressq
  1266. a = {'queue_type': 'execution', 'started': 'true',
  1267. 'enabled': 'true', 'Priority': 150}
  1268. self.server.manager(MGR_CMD_CREATE, QUEUE, a, id='expressq')
  1269. # Submit 3 jobs with delay of 1 sec
  1270. # Delay of 1 sec will preempt jid3 and then jid2.
  1271. a = {'Resource_List.ncpus': 1}
  1272. J = Job(TEST_USER, attrs=a)
  1273. jid1 = self.server.submit(J)
  1274. self.server.expect(JOB, {'job_state': 'R'}, id=jid1)
  1275. time.sleep(1)
  1276. J2 = Job(TEST_USER, attrs=a)
  1277. jid2 = self.server.submit(J2)
  1278. self.server.expect(JOB, {'job_state': 'R'}, id=jid2)
  1279. time.sleep(1)
  1280. J3 = Job(TEST_USER, attrs=a)
  1281. jid3 = self.server.submit(J3)
  1282. self.server.expect(JOB, {'job_state': 'R'}, id=jid3)
  1283. self.server.expect(JOB, {'job_state': 'R'}, id=jid1)
  1284. self.server.expect(JOB, {'job_state': 'R'}, id=jid2)
  1285. self.server.expect(JOB, {'job_state': 'R'}, id=jid3)
  1286. # Preempt jid3 with expressq, check 1 equivalence class is created
  1287. a = {'Resource_List.ncpus': 1, 'queue': 'expressq'}
  1288. Je = Job(TEST_USER, attrs=a)
  1289. jid4 = self.server.submit(Je)
  1290. self.server.expect(JOB, {'job_state': 'R'}, id=jid1)
  1291. self.server.expect(JOB, {'job_state': 'R'}, id=jid2)
  1292. self.server.expect(JOB, {'job_state': 'S'}, id=jid3)
  1293. self.server.expect(JOB, {'job_state': 'R'}, id=jid4)
  1294. self.scheduler.log_match("Number of job equivalence classes: 2",
  1295. starttime=self.t)
  1296. self.t = int(time.time())
  1297. # Preempt jid2, check no new equivalence class is created
  1298. Je2 = Job(TEST_USER, attrs=a)
  1299. jid5 = self.server.submit(Je2)
  1300. self.server.expect(JOB, {'job_state': 'R'}, id=jid1)
  1301. self.server.expect(JOB, {'job_state': 'S'}, id=jid2)
  1302. self.server.expect(JOB, {'job_state': 'S'}, id=jid3)
  1303. self.server.expect(JOB, {'job_state': 'R'}, id=jid4)
  1304. self.server.expect(JOB, {'job_state': 'R'}, id=jid5)
  1305. # Only One equivalence class for jid2 and jid3 is present since both
  1306. # suspended jobs are of same type and running on same vnode
  1307. self.scheduler.log_match("Number of job equivalence classes: 2",
  1308. starttime=self.t)
  1309. # Add a job to Queue state
  1310. a = {'Resource_List.ncpus': 1}
  1311. J = Job(TEST_USER, attrs=a)
  1312. jid6 = self.server.submit(J)
  1313. self.server.expect(JOB, {'job_state': 'R'}, id=jid1)
  1314. self.server.expect(JOB, {'job_state': 'S'}, id=jid2)
  1315. self.server.expect(JOB, {'job_state': 'S'}, id=jid3)
  1316. self.server.expect(JOB, {'job_state': 'R'}, id=jid4)
  1317. self.server.expect(JOB, {'job_state': 'R'}, id=jid5)
  1318. self.server.expect(JOB, {'job_state': 'Q'}, id=jid6)
  1319. # Set scheduling to false before deleting jobs to free nodes, so that
  1320. # suspended and queued jobs do not run. These jobs will be picked up
  1321. # in the next scheduling cycle when scheduling is again set to true
  1322. self.server.manager(MGR_CMD_SET, MGR_OBJ_SERVER,
  1323. {'scheduling': 'False'})
  1324. # Delete one running, one suspended job and one of high priority job
  1325. # This will leave 2 free nodes to pick up the suspended and queued job
  1326. self.server.deljob([jid1, jid2, jid5])
  1327. # if we use deljob(wait=True) starts the scheduling cycle if job
  1328. # takes more time to be deleted.
  1329. # The for loop below is to check that the jobs have been deleted
  1330. # without kicking off a new scheduling cycle.
  1331. deleted = False
  1332. for _ in range(20):
  1333. workq_dict = self.server.status(QUEUE, id='workq')[0]
  1334. expressq_dict = self.server.status(QUEUE, id='expressq')[0]
  1335. if workq_dict['total_jobs'] == '2'\
  1336. and expressq_dict['total_jobs'] == '1':
  1337. deleted = True
  1338. break
  1339. else:
  1340. # jobs take longer than one second to delete, use two seconds
  1341. time.sleep(2)
  1342. self.assertTrue(deleted)
  1343. self.server.manager(MGR_CMD_SET, MGR_OBJ_SERVER,
  1344. {'scheduling': 'True'})
  1345. self.server.expect(JOB, {'job_state': 'R'}, id=jid3)
  1346. self.server.expect(JOB, {'job_state': 'R'}, id=jid6)
  1347. def test_multiple_equivalence_class_preemption(self):
  1348. """
  1349. This test is to test that -
  1350. 1) Suspended jobs of different types go to different equiv classes
  1351. 2) Different types of jobs suspended by qsig signal
  1352. go to different equivalence classes
  1353. 3) Jobs of same type and same node on suspension by qsig
  1354. or preemption go to same equivalence classes
  1355. 4) Same type of suspended jobs, when resumed after qsig
  1356. and jobs suspended by preemption both go to same equivalence classes
  1357. """
  1358. # Create vnode with 4 ncpus
  1359. a = {'resources_available.ncpus': 4}
  1360. self.server.create_vnodes('vnode', a, 1, self.mom, usenatvnode=True)
  1361. # Create a expressq
  1362. a = {'queue_type': 'execution', 'started': 'true',
  1363. 'enabled': 'true', 'Priority': 150}
  1364. self.server.manager(MGR_CMD_CREATE, QUEUE, a, id='expressq')
  1365. # Submit regular job
  1366. a = {'Resource_List.ncpus': 1}
  1367. (jid1, jid2) = self.submit_jobs(2, a)
  1368. # Submit a job with walltime
  1369. a2 = {'Resource_List.ncpus': 1, 'Resource_List.walltime': 600}
  1370. (jid3, jid4) = self.submit_jobs(2, a2)
  1371. self.scheduler.log_match("Number of job equivalence classes: 2",
  1372. starttime=self.t)
  1373. self.server.expect(JOB, {'job_state': 'R'}, id=jid1)
  1374. self.server.expect(JOB, {'job_state': 'R'}, id=jid2)
  1375. self.server.expect(JOB, {'job_state': 'R'}, id=jid3)
  1376. self.server.expect(JOB, {'job_state': 'R'}, id=jid4)
  1377. # Suspend 1 job from each equivalence class
  1378. self.server.sigjob(jobid=jid1, signal="suspend")
  1379. self.server.sigjob(jobid=jid3, signal="suspend")
  1380. self.server.expect(JOB, {'job_state': 'S'}, id=jid1)
  1381. self.server.expect(JOB, {'job_state': 'R'}, id=jid2)
  1382. self.server.expect(JOB, {'job_state': 'S'}, id=jid3)
  1383. self.server.expect(JOB, {'job_state': 'R'}, id=jid4)
  1384. # Check that both suspended jobs go to different equivalence class
  1385. # 1 for jid1, 1 for jid2, 1 for jid3, and 1 for jid4
  1386. self.scheduler.log_match("Number of job equivalence classes: 4",
  1387. starttime=self.t)
  1388. # Start a high priority job to preempt jid 2 and jid4
  1389. a = {'Resource_List.ncpus': 4, 'queue': 'expressq'}
  1390. Je = Job(TEST_USER, attrs=a)
  1391. jid5 = self.server.submit(Je)
  1392. self.server.expect(JOB, {'job_state': 'S'}, id=jid1)
  1393. self.server.expect(JOB, {'job_state': 'S'}, id=jid2)
  1394. self.server.expect(JOB, {'job_state': 'S'}, id=jid3)
  1395. self.server.expect(JOB, {'job_state': 'S'}, id=jid4)
  1396. self.server.expect(JOB, {'job_state': 'R'}, id=jid5)
  1397. # Check only 3 equivalence class are present,
  1398. # i.e 1 equivalence class for jid1 and jid2,1 equivalence class
  1399. # for jid3 and jid4 and 1 equivalence class for jid5
  1400. self.scheduler.log_match("Number of job equivalence classes: 3",
  1401. starttime=self.t)
  1402. self.t = int(time.time())
  1403. # Resume the jobs suspended by qsig
  1404. # 1 second delay is added so that time of next logging moves ahead.
  1405. # This will make sure log_match does not take previous entry.
  1406. time.sleep(1)
  1407. self.server.sigjob(jobid=jid1, signal="resume")
  1408. self.server.sigjob(jobid=jid3, signal="resume")
  1409. # On resume check that there are same number of equivalence classes
  1410. self.scheduler.log_match("Number of job equivalence classes: 3",
  1411. starttime=self.t)
  1412. self.t = int(time.time())
  1413. # delete the expressq jobs and check that the suspended jobs
  1414. # go back to running state. equivalence classes=2 again
  1415. self.server.deljob(jid5, wait=True)
  1416. self.server.expect(JOB, {'job_state': 'R'}, id=jid1)
  1417. self.server.expect(JOB, {'job_state': 'R'}, id=jid2)
  1418. self.server.expect(JOB, {'job_state': 'R'}, id=jid3)
  1419. self.server.expect(JOB, {'job_state': 'R'}, id=jid4)
  1420. # Check equivalence classes =2
  1421. self.scheduler.log_match("Number of job equivalence classes: 2",
  1422. starttime=self.t)
  1423. def test_held_jobs_equiv_class(self):
  1424. """
  1425. 1) Test that held jobs do not go into another equivalence class.
  1426. 2) Running jobs do not go into a seperate equivalence class
  1427. """
  1428. a = {'resources_available.ncpus': 1}
  1429. self.server.create_vnodes('vnode', a, 1, self.mom, usenatvnode=True)
  1430. a = {'Resource_List.select': '1:ncpus=1', ATTR_h: None}
  1431. J1 = Job(TEST_USER, attrs=a)
  1432. jid1 = self.server.submit(J1)
  1433. a = {'Resource_List.select': '1:ncpus=1'}
  1434. J2 = Job(TEST_USER, attrs=a)
  1435. jid2 = self.server.submit(J2)
  1436. self.server.expect(JOB, {'job_state': 'H'}, id=jid1)
  1437. self.server.expect(JOB, {'job_state': 'R'}, id=jid2)
  1438. self.scheduler.log_match("Number of job equivalence classes: 1",
  1439. starttime=self.t)
  1440. def test_queue_resav(self):
  1441. """
  1442. Test that jobs in queues with resources_available limits use queue as
  1443. part of the criteria of making an equivalence class
  1444. """
  1445. a = {'resources_available.ncpus': 2}
  1446. self.server.create_vnodes('vnode', a, 1, self.mom, usenatvnode=True)
  1447. attrs = {'queue_type': 'Execution', 'started': 'True',
  1448. 'enabled': 'True', 'resources_available.ncpus': 1,
  1449. 'Priority': 10}
  1450. self.server.manager(MGR_CMD_CREATE, QUEUE, attrs, id='workq2')
  1451. self.server.manager(MGR_CMD_SET, SERVER, {'scheduling': 'False'})
  1452. a = {'queue': 'workq', 'Resource_List.select': '1:ncpus=1'}
  1453. a2 = {'queue': 'workq2', 'Resource_List.select': '1:ncpus=1'}
  1454. J = Job(TEST_USER, attrs=a)
  1455. jid1 = self.server.submit(J)
  1456. J = Job(TEST_USER, attrs=a2)
  1457. jid2 = self.server.submit(J)
  1458. J = Job(TEST_USER, attrs=a2)
  1459. jid3 = self.server.submit(J)
  1460. self.server.manager(MGR_CMD_SET, SERVER, {'scheduling': 'True'})
  1461. self.server.expect(JOB, {ATTR_state: 'R'}, id=jid1)
  1462. self.server.expect(JOB, {ATTR_state: 'R'}, id=jid2)
  1463. self.server.expect(JOB, {ATTR_state: 'Q'}, id=jid3)
  1464. # 2 quivalence classes - one for jobs inside workq2
  1465. # and one for jobs inside workq
  1466. self.scheduler.log_match("Number of job equivalence classes: 2",
  1467. starttime=self.t)
  1468. def test_overlap_resv(self):
  1469. """
  1470. Test that 2 overlapping reservation creates 2 different
  1471. equivalence classes
  1472. """
  1473. # Submit a reservation
  1474. a = {'Resource_List.select': '1:ncpus=1',
  1475. 'reserve_start': int(time.time()) + 20,
  1476. 'reserve_end': int(time.time()) + 300, }
  1477. r1 = Reservation(TEST_USER, a)
  1478. rid1 = self.server.submit(r1)
  1479. r2 = Reservation(TEST_USER, a)
  1480. rid2 = self.server.submit(r2)
  1481. a = {'reserve_state': (MATCH_RE, "RESV_CONFIRMED|2")}
  1482. self.server.expect(RESV, a, id=rid1)
  1483. self.server.expect(RESV, a, id=rid2)
  1484. r1name = rid1.split('.')
  1485. r2name = rid2.split('.')
  1486. a = {ATTR_queue: r1name[0], 'Resource_List.select': '1:ncpus=1'}
  1487. j1 = Job(TEST_USER, a)
  1488. jid1 = self.server.submit(j1)
  1489. self.server.expect(JOB, 'comment', op=SET, id=jid1)
  1490. self.server.expect(JOB, {'job_state': 'Q'}, id=jid1)
  1491. j2 = Job(TEST_USER, a)
  1492. jid2 = self.server.submit(j2)
  1493. self.server.expect(JOB, 'comment', op=SET, id=jid2)
  1494. self.server.expect(JOB, {'job_state': 'Q'}, id=jid2)
  1495. a = {ATTR_queue: r2name[0], 'Resource_List.select': '1:ncpus=1'}
  1496. j3 = Job(TEST_USER, a)
  1497. jid3 = self.server.submit(j3)
  1498. self.server.expect(JOB, 'comment', op=SET, id=jid3)
  1499. self.server.expect(JOB, {'job_state': 'Q'}, id=jid3)
  1500. j4 = Job(TEST_USER, a)
  1501. jid4 = self.server.submit(j4)
  1502. self.server.expect(JOB, 'comment', op=SET, id=jid4)
  1503. self.server.expect(JOB, {'job_state': 'Q'}, id=jid4)
  1504. # Wait for reservation to start
  1505. self.server.expect(RESV, {'reserve_state=RESV_RUNNING': 2}, offset=20)
  1506. # Verify that equivalence class is 2; one for
  1507. # each reservation queue
  1508. self.scheduler.log_match("Number of job equivalence classes: 2",
  1509. starttime=self.t)
  1510. # Verify that one job from R1 is running and
  1511. # one job from R2 is running
  1512. self.server.expect(JOB, {"job_state": 'R'}, id=jid1)
  1513. self.server.expect(JOB, {"job_state": 'R'}, id=jid3)
  1514. def test_limit_res(self):
  1515. """
  1516. Test when resources are being limited on, but those resources are not
  1517. in the sched_config resources line. Jobs requesting these resources
  1518. should be split into their own equivalence classes.
  1519. """
  1520. a = {ATTR_RESC_TYPE: 'long'}
  1521. self.server.manager(MGR_CMD_CREATE, RSC, a, id='foores')
  1522. a = {'max_run_res.foores': '[u:PBS_GENERIC=4]'}
  1523. self.server.manager(MGR_CMD_SET, SERVER, a)
  1524. a = {'Resource_List.foores': 1, 'Resource_List.select': '1:ncpus=1'}
  1525. self.submit_jobs(2, a)
  1526. a['Resource_List.foores'] = 2
  1527. (_, jid4) = self.submit_jobs(2, a)
  1528. self.server.expect(JOB, {'job_state=R': 3})
  1529. self.server.expect(JOB, 'comment', op=SET, id=jid4)
  1530. self.server.expect(JOB, {'job_state': 'Q'}, id=jid4)
  1531. (jid5, ) = self.submit_jobs(1)
  1532. self.server.expect(JOB, {'job_state': 'R'}, id=jid5)
  1533. # Verify that equivalence class is 3; one for
  1534. # foores=1 and one for foores=2 and
  1535. # one for no foores
  1536. self.scheduler.log_match("Number of job equivalence classes: 3",
  1537. starttime=self.t)