gomod: change projectcalico/calico to kubesphere/calico (#5557)

* chore(calico): update calico to 3.25.0

* chore(calico): replace projectcalico/calico to kubesphere/calico

Signed-off-by: root <renyunkang@kubesphere.io>

---------

Signed-off-by: root <renyunkang@kubesphere.io>
This commit is contained in:
Yunkang Ren
2023-02-28 17:03:36 +08:00
committed by GitHub
parent dc28a0917a
commit a3a6a1cd98
146 changed files with 11189 additions and 4663 deletions

557
vendor/github.com/projectcalico/calico/AUTHORS.md generated vendored Normal file
View File

@@ -0,0 +1,557 @@
# Calico authors
This file is auto-generated based on contribution records reported
by GitHub for the core repositories within the projectcalico/ organization. It is ordered alphabetically.
| Name | Email |
|--------|--------|
| Aalaesar | aalaesar@gmail.com |
| Aaron Roydhouse | aaron@roydhouse.com |
| Abhijeet Kasurde | akasurde@redhat.com |
| Abhinav Dahiya | abhinav.dahiya@coreos.com |
| Abhishek Jaisingh | abhi2254015@gmail.com |
| Adam Hoheisel | adam.hoheisel99@gmail.com |
| Adam Leskis | leskis@gmail.com |
| Adam Szecówka | adam.szecowka@sap.com |
| ahrkrak | andrew.randall@gmail.com |
| Alan | zg.zhu@daocloud.io |
| Alban Crequy | alban@kinvolk.io |
| Albert Vaca | albertvaka@gmail.com |
| Alejo Carballude | alejocarballude@gmail.com |
| Aleksandr Didenko | adidenko@mirantis.com |
| Aleksandr Dubinsky | almson@users.noreply.github.com |
| Alessandro Rossi | 4215912+kubealex@users.noreply.github.com |
| Alex Altair | alexanderaltair@gmail.com |
| Alex Chan | github@alexwlchan.fastmail.co.uk |
| Alex Hersh | alexander.hersh@metaswitch.com |
| Alex Nauda | alex@alexnauda.com |
| Alex O Regan | alexsoregan@gmail.com |
| Alex Pollitt | lxpollitt@users.noreply.github.com |
| Alex Rowley | rowleyaj@gmail.com |
| Alexander Brand | alexbrand09@gmail.com |
| Alexander Gama Espinosa | algamaes@microsoft.com |
| Alexander Golovko | alexandro@ankalagon.ru |
| Alexander Saprykin | asaprykin@mirantis.com |
| Alexander Varshavsky | alex.varshavsky@tigera.io |
| Alexey Magdich | itechart.aliaksei.mahdzich@tigera.io |
| Alexey Makhov | makhov.alex@gmail.com |
| Alexey Medvedchikov | alexey.medvedchikov@gmail.com |
| alexeymagdich-tigera | 56426143+alexeymagdich-tigera@users.noreply.github.com |
| alexhersh | hersh.a@husky.neu.edu |
| Alina Militaru | alina@tigera.io |
| Aloys Augustin | aloaugus@cisco.com |
| Aloÿs | aloys.augustin@polytechnique.org |
| Amim Knabben | amim.knabben@gmail.com |
| amq | amq@users.noreply.github.com |
| Anatoly Popov | aensidhe@users.noreply.github.com |
| Anders Janmyr | anders@janmyr.com |
| Andreas Jaeger | aj@suse.com |
| Andrei Nistor | andrei_nistor@smart-x.net |
| Andrew Donald Kennedy | andrew.international@gmail.com |
| Andrew Gaffney | andrew@agaffney.org |
| Andy Randall | andy@tigera.io |
| Anthony ARNAUD | aarnaud@eidosmontreal.com |
| Anthony BESCOND | anthony.bescond@kiln.fi |
| Anthony T | 25327116+anthonytwh@users.noreply.github.com |
| Anton Antonov | anton.synd.antonov@gmail.com |
| Anton Klokau | anton.klokau@gmail.com |
| anton-klokau | 54411589+anton-klokau@users.noreply.github.com |
| Antony Guinard | antony@tigera.io |
| Aram Alipoor | aram.alipoor@gmail.com |
| arikachen | eaglesora@gmail.com |
| Armon Dadgar | armon.dadgar@gmail.com |
| Artem Panchenko | apanchenko@mirantis.com |
| Artem Roma | aroma@mirantis.com |
| Artem Rymarchik | artemrymarchik@gmail.com |
| Artyom Rymarchik | artsiom.rymarchyk@itechart-group.com |
| Arundhati Surpur | arundhati@nectechnologies.in |
| Ashley Reese | ashley@victorianfox.com |
| asincu | alinamilitaru@Alinas-MacBook-Pro.local |
| Atkins | atkinschang@gmail.com |
| Avi Deitcher | avi@deitcher.net |
| Ayoub Elhamdani | a.elhamdani90@gmail.com |
| Barbara McKercher | barbara@tigera.io |
| bartek-lopatka | 54111388+bartek-lopatka@users.noreply.github.com |
| Bassam Tabbara | bassam@symform.com |
| Behnam Shobiri | behnam.shobeiri@gmail.com |
| Behnam-Shobiri | Behnam.shobeiri@gmail.com |
| Ben Schwartz | benschw@gmail.com |
| Benjamin | info@diffus.org |
| Benjamin S. Allen | bsallen@alcf.anl.gov |
| Bertrand Lallau | bertrand.lallau@gmail.com |
| Bill Hathaway | bill.hathaway@gmail.com |
| Bill Maxwell | bill@rancher.com |
| Billie Cleek | bcleek@monsooncommerce.com |
| bingshen.wbs | bingshen.wbs@alibaba-inc.com |
| bjhaid | abejideayodele@gmail.com |
| Blake Covarrubias | blake.covarrubias@gmail.com |
| Blucher | yfg44fox@126.com |
| bmckercher123 | 48458529+bmckercher123@users.noreply.github.com |
| Bogdan Dobrelya | bdobrelia@mirantis.com |
| Brad Beam | brad.beam@b-rad.info |
| Brad Behle | behle@us.ibm.com |
| Brendan Creane | brendan@tigera.io |
| Brian Ketelsen | bketelsen@gmail.com |
| Brian Kim | brian@tigera.io |
| Brian McMahon | brianmcmahon135@gmail.com |
| briansan | bkimstunnaboss@gmail.com |
| Brook-Roberts | brook.roberts@metaswitch.com |
| Bryan Reese | bryan.mreese@gmail.com |
| Cao Shufeng | caosf.fnst@cn.fujitsu.com |
| Cao Xuan Hoang | hoangcx@vn.fujitsu.com |
| Carlos Alberto | euprogramador@gmail.com |
| Casey D | casey.davenport@metaswitch.com |
| Casey Davenport | davenport.cas@gmail.com |
| Cezar Sa Espinola | cezarsa@gmail.com |
| Chakravarthy Gopi | cgopi@us.ibm.com |
| Chance Zibolski | chance.zibolski@gmail.com |
| Chen Donghui | chendh521@gmail.com |
| Chengwei Yang | yangchengwei@qiyi.com |
| chenqijun | chenqijun@corp.netease.com |
| Chris Armstrong | chris@opdemand.com |
| Chris Church | chris.church@gmail.com |
| Chris Hoge | chris@hogepodge.com |
| Chris McNabb | raizyr@gmail.com |
| Chris Tomkins | chris.tomkins@tigera.io |
| Christian Klauser | christianklauser@outlook.com |
| Christian Simon | simon@swine.de |
| Christopher | chris.tauchen@tigera.io |
| Christopher Grim | christopher.grim@gmail.com |
| Christopher LIJLENSTOLPE | github@cdl.asgaard.org |
| Christopher LILJENSTOLPE | cdl@asgaard.org |
| cinience | cinience@qq.com |
| Ciprian Hacman | ciprian@hakman.dev |
| Clement Laforet | sheepkiller@cotds.org |
| Cody McCain | cody@tigera.io |
| Cookie | luckymrwang@163.com |
| Cory Benfield | lukasaoz@gmail.com |
| crandl201 | christopher_randles@cable.comcast.com |
| Cristian Vrabie | cristian.vrabie@gmail.com |
| Cyclinder | qifeng.guo@daocloud.io |
| Dalton Hubble | dghubble@gmail.com |
| Dan | djosborne@users.noreply.github.com |
| Dan (Turk) | dan@projectcalico.org |
| Dan Bond | pm@danbond.io |
| Dan O'Brien | dobrien.nj@gmail.com |
| Dan Osborne | djosborne10@gmail.com |
| Daniel Hoherd | daniel.hoherd@gmail.com |
| Daniel Megyesi | daniel.megyesi@liligo.com |
| Dario Nieuwenhuis | dirbaio@dirbaio.net |
| Darren Chin | dc@darrench.in |
| Dave Hay | david_hay@uk.ibm.com |
| Dave Langridge | dave@calico.com |
| David Haupt | dhaupt@redhat.com |
| David Igou | igou.david@gmail.com |
| David J. Wilder | wilder@us.ibm.com |
| David Tesar | david.tesar@microsoft.com |
| Denis Iskandarov | d.iskandarov@gmail.com |
| depay | depay19@163.com |
| derek mcquay | derek@tigera.io |
| Derk Muenchhausen | derk@muenchhausen.de |
| Didier Durand | durand.didier@gmail.com |
| Dominic DeMarco | ddemarc@us.ibm.com |
| Doug Collier | doug@tigera.io |
| Doug Davis | duglin@users.noreply.github.com |
| Doug Hellmann | doug@doughellmann.com |
| Doug Wiegley | dwiegley@salesforce.com |
| Dries Harnie | dries+github@harnie.be |
| du | du@njtech.edu.cn |
| Duan Jiong | djduanjiong@gmail.com |
| Duong Ha-Quang | duonghq@vn.fujitsu.com |
| Dylan Pindur | dylanpindur@gmail.com |
| Ed Harrison | eepyaich@users.noreply.github.com |
| Edbert | ecandra@protonmail.com |
| Elson Rodriguez | elson.rodriguez@gmail.com |
| emanic | emily@tigera.io |
| Emma Gordon | emma@projectcalico.org |
| EmmEff | mikef17@gmail.com |
| Eran Reshef | eran.reshef@arm.com |
| Eric Anderson | anderson@stackengine.com |
| Eric Barch | ericb@ericbarch.com |
| Eric Hoffmann | 31017077+2ffs2nns@users.noreply.github.com |
| Erik Stidham | estidham@gmail.com |
| Ernest Wong | chuwon@microsoft.com |
| Ernesto Jiménez | me@ernesto-jimenez.com |
| Ethan Chu | xychu2008@gmail.com |
| Eugen Mayer | 136934+EugenMayer@users.noreply.github.com |
| F41gh7 | info@fght.net |
| Fabian Ruff | fabian@progra.de |
| Fahad Arshad | fahadaliarshad@gmail.com |
| fcuello-fudo | 51087976+fcuello-fudo@users.noreply.github.com |
| Feilong Wang | flwang@catalyst.net.nz |
| fen4o | martin.vladev@gmail.com |
| Fernando Alvarez | methadato@gmail.com |
| Fernando Cainelli | fernando.cainelli@gmail.com |
| Fionera | fionera@fionera.de |
| Flavio Percoco | flaper87@gmail.com |
| Foivos Filippopoulos | foivosfilip@gmail.com |
| frank | frank@tigera.io |
| Frank Greco Jr | frankgreco@northwesternmutual.com |
| François PICOT | fpicot@users.noreply.github.com |
| Fredrik Steen | stone4x4@gmail.com |
| freecaykes | edbert@tigera.io |
| frnkdny | frank.danyo@gmail.com |
| fumihiko kakuma | kakuma@valinux.co.jp |
| Gabriel Monroy | gabriel@opdemand.com |
| Gaurav | 48036489+realgaurav@users.noreply.github.com |
| Gaurav Khatri | gaurav@tigera.io |
| Gaurav Sinha | gaurav.sinha@tigera.io |
| Gautam K | gautam.nitheesh@gmail.com |
| gdziwoki | gdziwoki@gmail.com |
| gengchc2 | geng.changcai2@zte.com.cn |
| Gerard Hickey | hickey@kinetic-compute.com |
| Giancarlo Rubio | gianrubio@gmail.com |
| Gianluca | 52940363+gianlucam76@users.noreply.github.com |
| Gianluca Mardente | gianluca@tigera.io |
| Gobinath Krishnamoorthy | gobinath@tigera.io |
| Guang Ya Liu | gyliu513@gmail.com |
| Guangming Wang | guangming.wang@daocloud.io |
| Guillaume LECERF | glecerf@gmail.com |
| guirish | guirish |
| gunboe | guntherboeckmann@gmail.com |
| Gunjan "Grass-fed Rabbit" Patel | patelgunjan5@gmail.com |
| GuyTempleton | guy.templeton@skyscanner.net |
| Hagen Kuehn | hagen.kuehn@quater.io |
| halfcrazy | hackzhuyan@gmail.com |
| Hanamantagoud | hanamantagoud.v.kandagal@est.tech |
| hanamantagoudvk | 68010010+hanamantagoudvk@users.noreply.github.com |
| hedi bouattour | hbouatto@cisco.com |
| Helen Chang | c6h3un@gmail.com |
| Henry Gessau | gessau@gmail.com |
| huang.zhiping | huang.zhiping@99cloud.net |
| Huanle Han | hanhuanle@caicloud.io |
| Hui Kang | kangh@us.ibm.com |
| Huo Qi Feng | huoqif@cn.ibm.com |
| Iago López Galeiras | iago@kinvolk.io |
| ialidzhikov | i.alidjikov@gmail.com |
| Ian Wienand | iwienand@redhat.com |
| Icarus9913 | icaruswu66@qq.com |
| Igor Kapkov | igasgeek@me.com |
| Ihar Hrachyshka | ihrachys@redhat.com |
| ijumps | “bigerjump@gmail.com” |
| ISHIDA Wataru | ishida.wataru@lab.ntt.co.jp |
| Ivar Larsson | ivar@bloglovin.com |
| IWAMOTO Toshihiro | iwamoto@valinux.co.jp |
| J. Grizzard | jgrizzard@box.com |
| Jack Kleeman | jackkleeman@gmail.com |
| Jacob Hayes | jacob.r.hayes@gmail.com |
| Jade Chunnananda | jade.jch@gmail.com |
| Jak | 44370243+jak-sdk@users.noreply.github.com |
| James E. Blair | jeblair@redhat.com |
| James Lucktaylor | jlucktay@users.noreply.github.com |
| James Pollard | james@leapyear.io |
| James Sturtevant | jsturtevant@gmail.com |
| Jamie | 91jme@users.noreply.github.com |
| Jan Brauer | jan@jimdo.com |
| Jan Ivar Beddari | code@beddari.net |
| janonymous | janonymous.codevulture@gmail.com |
| jay vyas | jvyas@vmware.com |
| Jean-Sebastien Mouret | js.mouret@gmail.com |
| Jeff Schroeder | jeffschroeder@computer.org |
| Jenkins | jenkins@review.openstack.org |
| Jens Henrik Hertz | jens@treatwell.nl |
| Jesper Dangaard Brouer | brouer@redhat.com |
| Jiawei Huang | jiawei@tigera.io |
| Jimmy McCrory | jimmy.mccrory@gmail.com |
| jinglinax@163.com | jinglinax@163.com |
| jmjoy | 918734043@qq.com |
| Joanna Solmon | joanna.solmon@gmail.com |
| Joel Bastos | kintoandar@users.noreply.github.com |
| Johan Fleury | jfleury+github@arcaik.net |
| Johannes M. Scheuermann | joh.scheuer@gmail.com |
| Johannes Scheerer | johannes.scheerer@sap.com |
| johanneswuerbach | johannes.wuerbach@googlemail.com |
| John Engelman | john.r.engelman@gmail.com |
| jolestar | jolestar@gmail.com |
| Jonah Back | jonah@jonahback.com |
| Jonathan Boulle | jonathanboulle@gmail.com |
| Jonathan M. Wilbur | jonathan@wilbur.space |
| Jonathan Palardy | jonathan.palardy@gmail.com |
| Jonathan Sabo | jonathan@sabo.io |
| Jonathan Sokolowski | jonathan.sokolowski@gmail.com |
| jose-bigio | jose.bigio@docker.com |
| Joseph Gu | aceralon@outlook.com |
| Josh Conant | deathbeforedishes@gmail.com |
| Josh Lucas | josh.lucas@tigera.io |
| joshti | 56737865+joshti@users.noreply.github.com |
| Joshua Allard | josh@tigera.io |
| joshuactm | joshua.colvin@ticketmaster.com |
| Julien Dehee | PrFalken@users.noreply.github.com |
| Jussi Nummelin | jussi.nummelin@digia.com |
| Justin | justin@tigera.io |
| Justin Burnham | justin@jburnham.net |
| Justin Cattle | j@ocado.com |
| Justin Nauman | justin.r.nauman+github@gmail.com |
| Justin Pacheco | jpacheco39@bloomberg.net |
| Justin Sievenpiper | justin@sievenpiper.co |
| JW Bell | bjwbell@gmail.com |
| Kamil Madac | kamil.madac@gmail.com |
| Karl Matthias | karl.matthias@gonitro.com |
| Karthik Gaekwad | karthik.gaekwad@gmail.com |
| Karthik Krishnan Ramasubramanian | mail@karthikkrishnan.me |
| Kashif Saadat | kashifsaadat@gmail.com |
| Kelsey Hightower | kelsey.hightower@gmail.com |
| Ketan Kulkarni | ketkulka@gmail.com |
| Kevin Benton | blak111@gmail.com |
| Kevin Lynch | klynch@gmail.com |
| Kiran Divekar | calsoft.kiran.divekar@tigera.io |
| Kirill Buev | kirill.buev@pm.me |
| Kris Gambirazzi | kris.gambirazzi@transferwise.com |
| Krzesimir Nowak | krzesimir@kinvolk.io |
| Krzysztof Cieplucha | krisiasty@users.noreply.github.com |
| l1b0k | libokang.dev@gmail.com |
| Lance Robson | lancelot.robson@gmail.com |
| Lancelot Robson | lancelot.robson@metaswitch.com |
| Lars Ekman | lars.g.ekman@est.tech |
| Laurence Man | laurence@tigera.io |
| Le Hou | houl7@chinaunicom.cn |
| Lee Briggs | lbriggs@apptio.com |
| Leo Ochoa | leo8a@users.noreply.github.com |
| Li-zhigang | li.zhigang3@zte.com.cn |
| libby kent | viskcode@gmail.com |
| lilintan | lintan.li@easystack.cn |
| LinYushen | linyushen@qiniu.com |
| lippertmarkus | lippertmarkus@gmx.de |
| LittleBoy18 | 2283985296@qq.com |
| liubog2008 | liubog2008@gmail.com |
| Liz Rice | liz@lizrice.com |
| llr | nightmeng@gmail.com |
| Logan Davis | 38335829+logand22@users.noreply.github.com |
| Logan V | logan2211@gmail.com |
| lou-lan | loulan@loulan.me |
| Luiz Filho | luizbafilho@gmail.com |
| Luke Mino-Altherr | luke.mino-altherr@metaswitch.com |
| luobily | luobily@gmail.com |
| Luthfi Anandra | luthfi.anandra@gmail.com |
| Lv Jiawei | lvjiawei@cmss.chinamobile.com |
| maao | maao@cmss.chinamobile.com |
| Manjunath A Kumatagi | mkumatag@in.ibm.com |
| Manuel Buil | mbuil@suse.com |
| Marga Millet | marga.sfo@gmail.com |
| Marius Grigaitis | marius.grigaitis@home24.de |
| Mark Fermor | markfermor@holidayextras.com |
| Mark Petrovic | mspetrovic@gmail.com |
| markruler | csu0414@gmail.com |
| Marlin Cremers | marlinc@marlinc.nl |
| Marshall Ford | inbox@marshallford.me |
| Martijn Koster | mak-github@greenhills.co.uk |
| Martin Evgeniev | suizman@users.noreply.github.com |
| marvin-tigera | marvin-tigera@users.noreply.github.com |
| Mat Meredith | matthew.meredith@metaswitch.net |
| Mateusz Gozdek | mgozdek@microsoft.com |
| Mathias Lafeldt | mathias.lafeldt@gmail.com |
| Matt | matt@projectcalico.org |
| Matt Boersma | matt@opdemand.com |
| Matt Dupre | matthewdupre@users.noreply.github.com |
| Matt Kelly | Matthew.Joseph.Kelly@gmail.com |
| Matt Leung | mleung975@gmail.com |
| Matthew | mfisher@engineyard.com |
| Matthew Fenwick | mfenwick100@gmail.com |
| Matthew Fisher | matthewf@opdemand.com |
| Max Kudosh | max_kudosh@hotmail.com |
| Max S | maxstr@users.noreply.github.com |
| Max Stritzinger | mstritzinger@bloomberg.net |
| Maxim Ivanov | ivanov.maxim@gmail.com |
| Maximilian Bischoff | maximilian.bischoff@inovex.de |
| Mayo | mayocream39@yahoo.co.jp |
| Mazdak Nasab | mazdak@tigera.io |
| mchtech | michu_an@126.com |
| meeee | michael+git@frister.net |
| meijin | meijin@tiduyun.com |
| melissaml | ma.lei@99cloud.net |
| Michael Dong | michael.dong@vrviu.com |
| Michael Stowe | me@mikestowe.com |
| Michael Vierling | michael@tigera.io |
| Micheal Waltz | ecliptik@gmail.com |
| Mikalai Kastsevich | kostevich-kolya@mail.ru |
| Mike Kostersitz | mikek@microsoft.com |
| Mike Palmer | mike@mikepalmer.net |
| Mike Scherbakov | mihgen@gmail.com |
| Mike Spreitzer | mspreitz@us.ibm.com |
| Mike Stephen | mike.stephen@tigera.io |
| Mike Stowe | mikestowe@Mikes-MBP.sfo.tigera.io |
| mikev | mvierling@gmail.com |
| Miouge1 | Miouge1@users.noreply.github.com |
| ml | 6209465+ml-@users.noreply.github.com |
| mlbarrow | michael@barrow.me |
| MofeLee | mofe@me.com |
| Mohamed | mohamed.elzarei@motius.de |
| Molnigt | jan.munkhammar@safespring.com |
| Monty Taylor | mordred@inaugust.com |
| Mridul Gain | mridulgain@gmail.com |
| Muhammad Saghir | msagheer92@gmail.com |
| Muhammet Arslan | muhammet.arsln@gmail.com |
| Murali Paluru | leodotcloud@gmail.com |
| Mészáros Mihály | misi@majd.eu |
| Nate Taylor | ntaylor1781@gmail.com |
| Nathan Fritz | fritzy@netflint.net |
| Nathan Skrzypczak | nathan.skrzypczak@gmail.com |
| Nathan Wouda | nwouda@users.noreply.github.com |
| Neil Jerram | nj@metaswitch.com |
| Nic Doye | nic@worldofnic.org |
| Nick Bartos | nick@pistoncloud.com |
| Nick Wood | nwood@microsoft.com |
| Nikkau | nikkau@nikkau.net |
| Nirman Narang | narang@us.ibm.com |
| njuptlzf | njuptlzf@163.com |
| Noah Treuhaft | noah.treuhaft@docker.com |
| nohajc | nohajc@gmail.com |
| nuczzz | 33566732+nuczzz@users.noreply.github.com |
| nuxeric | 48699932+nuxeric@users.noreply.github.com |
| Oded Lazar | odedlaz@gmail.com |
| oldtree2k | oldtree2k@users.noreply.github.com |
| Olivier Bourdon | obourdon@mirantis.com |
| Onong Tayeng | onong.tayeng@gmail.com |
| OpenDev Sysadmins | openstack-infra@lists.openstack.org |
| Otto Sulin | otto.sulin@gmail.com |
| Owen Tuz | owen@segfault.re |
| pasanw | pasanweerasinghe@gmail.com |
| Patrick Marques | pmarques@users.noreply.github.com |
| Patrik Lundin | patrik@sigterm.se |
| Paul Tiplady | symmetricone@gmail.com |
| Pavel Khusainov | pkhusainov@mz.com |
| Pedro Coutinho | pedro@tigera.io |
| Penkey Suresh | penkeysuresh@users.noreply.github.com |
| penkeysuresh | penkeysuresh@gmail.com |
| peter | peterkellyonline@gmail.com |
| Peter Kelly | 659713+petercork@users.noreply.github.com |
| Peter Nordquist | peter.nordquist@pnnl.gov |
| Peter Salanki | peter@salanki.st |
| Peter White | peter.white@metaswitch.com |
| Phil Kates | me@philkates.com |
| Philip Southam | philip.southam@jpl.nasa.gov |
| Phu Kieu | pkieu@jpl.nasa.gov |
| Pierre Grimaud | grimaud.pierre@gmail.com |
| Pike.SZ.fish | pikeszfish@gmail.com |
| Prayag Verma | prayag.verma@gmail.com |
| Pushkar Joglekar | pjoglekar@vmware.com |
| PythonSyntax1 | 51872355+PythonSyntax1@users.noreply.github.com |
| Qiu Yu | qiuyu@ebaysf.com |
| Rafael | rafael@tigera.io |
| Rafal Borczuch | rafalq.b+github@gmail.com |
| Rafe Colton | r.colton@modcloth.com |
| Rahul Krishna Upadhyaya | rakrup@gmail.com |
| rao yunkun | yunkunrao@gmail.com |
| Renan Gonçalves | renan.saddam@gmail.com |
| Rene Dekker | rene@tigera.io |
| Rene Kaufmann | kaufmann.r@gmail.com |
| Reza R | 54559947+frozenprocess@users.noreply.github.com |
| Ricardo Katz | rikatz@users.noreply.github.com |
| Ricardo Pchevuzinske Katz | ricardo.katz@serpro.gov.br |
| Richard Kovacs | kovacsricsi@gmail.com |
| Richard Laughlin | richardwlaughlin@gmail.com |
| Richard Marshall | richard.marshall@ask.com |
| Ripta Pasay | ripta@users.noreply.github.com |
| Rob Brockbank | robbrockbank@gmail.com |
| Rob Terhaar | robbyt@robbyt.net |
| Robert Brockbank | rob.brockbank@metswitch.com |
| Robert Coleman | github@robert.net.nz |
| Roberto Alcantara | roberto@eletronica.org |
| Robin Müller | robin.mueller@outlook.de |
| Rodrigo Barbieri | rodrigo.barbieri2010@gmail.com |
| Roman Danko | elcomtik@users.noreply.github.com |
| Roman Sokolkov | roman@giantswarm.io |
| Ronnie P. Thomas | rpthms@users.noreply.github.com |
| Roshani Rathi | rrroshani227@gmail.com |
| roshanirathi | 42164609+roshanirathi@users.noreply.github.com |
| Rui Chen | rchen@meetup.com |
| rushtehrani | r@inven.io |
| Rustam Zagirov | stammru@gmail.com |
| Ryan Zhang | ryan.zhang@docker.com |
| rymarchikbot | 43807162+rymarchikbot@users.noreply.github.com |
| Saeid Askari | askari.saeed@gmail.com |
| Satish Matti | smatti@google.com |
| Satoru Takeuchi | sat@cybozu.co.jp |
| Saurabh Mohan | saurabh@tigera.io |
| Sean Kilgore | logikal@users.noreply.github.com |
| Sedef | ssavas@vmware.com |
| Semaphore Automatic Update | tom@tigera.io |
| Sergey Kulanov | skulanov@mirantis.com |
| Sergey Melnik | sergey.melnik@commercetools.de |
| Seth | sethpmccombs@gmail.com |
| Seth Malaki | seth@tigera.io |
| Shatrugna Sadhu | shatrugna.sadhu@gmail.com |
| Shaun Crampton | smc@metaswitch.com |
| shouheng.lei | shouheng.lei@easystack.cn |
| Simão Reis | smnrsti@gmail.com |
| SONG JIANG | song@tigera.io |
| SongmingYan | yan.songming@zte.com.cn |
| spdfnet | 32593931+spdfnet@users.noreply.github.com |
| Spike Curtis | spike@tigera.io |
| squ94wk | squ94wk@googlemail.com |
| sridhar | sridhar@tigera.io |
| sridhartigera | 63839878+sridhartigera@users.noreply.github.com |
| Sriram Yagnaraman | sriram.yagnaraman@est.tech |
| Stanislav Yotov | 29090864+svyotov@users.noreply.github.com |
| Stanislav-Galchynski | Stanislav.Galchynski@itechart-group.com |
| Stefan Breunig | stefan.breunig@xing.com |
| Stefan Bueringer | sbueringer@gmail.com |
| Stephen Schlie | schlie@tigera.io |
| Steve Gao | steve@tigera.io |
| Stéphane Cottin | stephane.cottin@vixns.com |
| Suraiya Hameed | 22776421+Suraiya-Hameed@users.noreply.github.com |
| Suraj Narwade | surajnarwade353@gmail.com |
| svInfra17 | vinayak@infracloud.io |
| Szymon Pyżalski | spyzalski@mirantis.com |
| TAKAHASHI Shuuji | shuuji3@gmail.com |
| Tamal Saha | tamal@appscode.com |
| Tathagata Chowdhury | calsoft.tathagata.chowdhury@tigera.io |
| tathagatachowdhury | tathagata.chowdhury@calsoftinc.com |
| Teller-Ulam | 2749404+Teller-Ulam@users.noreply.github.com |
| Thijs Scheepers | tscheepers@users.noreply.github.com |
| Thilo Fromm | thilo@kinvolk.io |
| Thomas Lohner | tl@scale.sc |
| Tim Bart | tim@pims.me |
| Tim Briggs | timothydbriggs@gmail.com |
| Timo Beckers | timo.beckers@klarrio.com |
| Todd Nine | tnine@apigee.com |
| Tom Denham | tom@tomdee.co.uk |
| Tom Pointon | tom@teepeestudios.net |
| Tomas Hruby | tomas@tigera.io |
| Tomas Mazak | tomas@valec.net |
| Tommaso Pozzetti | tommypozzetti@hotmail.it |
| tonic | tonicbupt@gmail.com |
| ToroNZ | tomasmaggio@gmail.com |
| Trapier Marshall | trapier.marshall@docker.com |
| Trevor Tao | trevor.tao@arm.com |
| Trond Hasle Amundsen | t.h.amundsen@usit.uio.no |
| turekt | 32360115+turekt@users.noreply.github.com |
| tuti | tuti@tigera.io |
| Tyler Stachecki | tstachecki@bloomberg.net |
| Uwe Dauernheim | uwe@dauernheim.net |
| Uwe Krueger | uwe.krueger@sap.com |
| vagrant | vagrant@mesos.vm |
| Valentin Ouvrard | valentin.ouvrard@nautile.sarl |
| Viacheslav Vasilyev | avoidik@gmail.com |
| Vieri | 15050873171@163.com |
| Vincent Schwarzer | vincent.schwarzer@yahoo.de |
| Vivek Thrivikraman | vivek.thrivikraman@est.tech |
| wangwengang | wangwengang@inspur.com |
| Wei Kin Huang | weikin.huang04@gmail.com |
| Wei.ZHAO | zhaowei@qiyi.com |
| weizhouBlue | 45163302+weizhouBlue@users.noreply.github.com |
| Wietse Muizelaar | wmuizelaar@bol.com |
| Will Rouesnel | w.rouesnel@gmail.com |
| Wouter Schoot | wouter@schoot.org |
| wuranbo | wuranbo@gmail.com |
| wwgfhf | 51694849+wwgfhf@users.noreply.github.com |
| Xiang Dai | 764524258@qq.com |
| Xiang Liu | lx1036@126.com |
| xieyanker | xjsisnice@gmail.com |
| Xin He | he_xinworld@126.com |
| YAMAMOTO Takashi | yamamoto@midokura.com |
| Yan Zhu | yanzhu@alauda.io |
| yang59324 | yang59324@163.com |
| yanyan8566 | 62531742+yanyan8566@users.noreply.github.com |
| yassan | yassan0627@gmail.com |
| Yecheng Fu | cofyc.jackson@gmail.com |
| Yi He | yi.he@arm.com |
| Yi Tao | yitao@qiniu.com |
| ymyang | yangym9@lenovo.com |
| Yongkun Gui | ygui@google.com |
| Yuji Azama | yuji.azama@gmail.com |
| zealic | zealic@gmail.com |
| zhangjie | zhangjie0619@yeah.net |
| zhouxinyong | zhouxinyong@inspur.com |
| Zopanix | zopanix@gmail.com |
| Zuul | zuul@review.openstack.org |

176
vendor/github.com/projectcalico/calico/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,176 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.

View File

@@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -0,0 +1,296 @@
// Copyright (c) 2017 Tigera, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cache
import (
"reflect"
"sync"
"time"
"github.com/patrickmn/go-cache"
log "github.com/sirupsen/logrus"
"k8s.io/client-go/util/workqueue"
)
// ResourceCache stores resources and queues updates when those resources
// are created, modified, or deleted. It de-duplicates updates by ensuring
// updates are only queued when an object has changed.
type ResourceCache interface {
// Set sets the key to the provided value, and generates an update
// on the queue the value has changed.
Set(key string, value interface{})
// Get gets the value associated with the given key. Returns nil
// if the key is not present.
Get(key string) (interface{}, bool)
// Prime sets the key to the provided value, but does not generate
// and update on the queue ever.
Prime(key string, value interface{})
// Delete deletes the value identified by the given key from the cache, and
// generates an update on the queue if a value was deleted.
Delete(key string)
// Clean removes the object identified by the given key from the cache.
// It does not generate an update on the queue.
Clean(key string)
// ListKeys lists the keys currently in the cache.
ListKeys() []string
// Run enables the generation of events on the output queue starts
// cache reconciliation.
Run(reconcilerPeriod string)
// GetQueue returns the cache's output queue, which emits a stream
// of any keys which have been created, modified, or deleted.
GetQueue() workqueue.RateLimitingInterface
}
// ResourceCacheArgs struct passed to constructor of ResourceCache.
// Groups togather all the arguments to pass in single struct.
type ResourceCacheArgs struct {
// ListFunc returns a mapping of keys to objects from the Calico datastore.
ListFunc func() (map[string]interface{}, error)
// ObjectType is the type of object which is to be stored in this cache.
ObjectType reflect.Type
// LogTypeDesc (optional) to log the type of object stored in the cache.
// If not provided it is derived from the ObjectType.
LogTypeDesc string
ReconcilerConfig ReconcilerConfig
}
// ReconcilerConfig contains configuration for the periodic reconciler.
type ReconcilerConfig struct {
// DisableUpdateOnChange disables the queuing of updates when the reconciler
// detects that a value has changed in the datastore.
DisableUpdateOnChange bool
// DisableMissingInDatastore disables queueing of updates when the reconciler
// detects that a value is no longer in the datastore but still exists in the cache.
DisableMissingInDatastore bool
// DisableMissingInCache disables queueing of updates when reconciler detects
// that a value that is still in the datastore no longer is in the cache.
DisableMissingInCache bool
}
// calicoCache implements the ResourceCache interface
type calicoCache struct {
threadSafeCache *cache.Cache
workqueue workqueue.RateLimitingInterface
ListFunc func() (map[string]interface{}, error)
ObjectType reflect.Type
log *log.Entry
running bool
mut *sync.Mutex
reconcilerConfig ReconcilerConfig
}
// NewResourceCache builds and returns a resource cache using the provided arguments.
func NewResourceCache(args ResourceCacheArgs) ResourceCache {
// Make sure logging is context aware.
return &calicoCache{
threadSafeCache: cache.New(cache.NoExpiration, cache.DefaultExpiration),
workqueue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
ListFunc: args.ListFunc,
ObjectType: args.ObjectType,
log: func() *log.Entry {
if args.LogTypeDesc == "" {
return log.WithFields(log.Fields{"type": args.ObjectType})
}
return log.WithFields(log.Fields{"type": args.LogTypeDesc})
}(),
mut: &sync.Mutex{},
reconcilerConfig: args.ReconcilerConfig,
}
}
func (c *calicoCache) Set(key string, newObj interface{}) {
if reflect.TypeOf(newObj) != c.ObjectType {
c.log.Fatalf("Wrong object type received to store in cache. Expected: %s, Found: %s", c.ObjectType, reflect.TypeOf(newObj))
}
// Check if the object exists in the cache already. If it does and hasn't changed,
// then we don't need to send an update on the queue.
if existingObj, found := c.threadSafeCache.Get(key); found {
c.log.Debugf("%#v already exists in cache - comparing.", existingObj)
if !reflect.DeepEqual(existingObj, newObj) {
// The objects do not match - send an update over the queue.
c.threadSafeCache.Set(key, newObj, cache.NoExpiration)
if c.isRunning() {
c.log.Debugf("Queueing update - %#v and %#v do not match.", newObj, existingObj)
c.workqueue.Add(key)
}
}
} else {
c.threadSafeCache.Set(key, newObj, cache.NoExpiration)
if c.isRunning() {
c.log.Debugf("%#v not found in cache, adding it + queuing update.", newObj)
c.workqueue.Add(key)
}
}
}
func (c *calicoCache) Delete(key string) {
c.log.Debugf("Deleting %s from cache", key)
c.threadSafeCache.Delete(key)
c.workqueue.Add(key)
}
func (c *calicoCache) Clean(key string) {
c.log.Debugf("Cleaning %s from cache, no update required", key)
c.threadSafeCache.Delete(key)
}
func (c *calicoCache) Get(key string) (interface{}, bool) {
obj, found := c.threadSafeCache.Get(key)
if found {
return obj, true
}
return nil, false
}
// Prime adds the key and value to the cache but will never generate
// an update on the queue.
func (c *calicoCache) Prime(key string, value interface{}) {
c.threadSafeCache.Set(key, value, cache.NoExpiration)
}
// ListKeys returns a list of all the keys in the cache.
func (c *calicoCache) ListKeys() []string {
cacheItems := c.threadSafeCache.Items()
keys := make([]string, 0, len(cacheItems))
for k := range cacheItems {
keys = append(keys, k)
}
return keys
}
// GetQueue returns the output queue from the cache. Whenever a key/value pair
// is modified, an event will appear on this queue.
func (c *calicoCache) GetQueue() workqueue.RateLimitingInterface {
return c.workqueue
}
// Run starts the cache. Any Set() calls prior to calling Run() will
// prime the cache, but not trigger any updates on the output queue.
func (c *calicoCache) Run(reconcilerPeriod string) {
go c.reconcile(reconcilerPeriod)
// Indicate that the cache is running, and so updates
// can be queued.
c.mut.Lock()
c.running = true
c.mut.Unlock()
}
func (c *calicoCache) isRunning() bool {
c.mut.Lock()
defer c.mut.Unlock()
return c.running
}
// reconcile ensures a reconciliation is run every `reconcilerPeriod` in order to bring the datastore
// in sync with the cache. This is to correct any manual changes made in the datastore
// without the cache being aware.
func (c *calicoCache) reconcile(reconcilerPeriod string) {
duration, err := time.ParseDuration(reconcilerPeriod)
if err != nil {
c.log.Fatalf("Invalid time duration format for reconciler: %s. Some valid examples: 5m, 30s, 2m30s etc.", reconcilerPeriod)
}
// If user has set duration to 0 then disable the reconciler job.
if duration.Nanoseconds() == 0 {
c.log.Infof("Reconciler period set to %d. Disabling reconciler.", duration.Nanoseconds())
return
}
// Loop forever, performing a datastore reconciliation periodically.
for {
c.log.Debugf("Performing reconciliation")
err := c.performDatastoreSync()
if err != nil {
c.log.WithError(err).Error("Reconciliation failed")
continue
}
// Reconciliation was successful, sleep the configured duration.
c.log.Debugf("Reconciliation complete, %+v until next one.", duration)
time.Sleep(duration)
}
}
func (c *calicoCache) performDatastoreSync() error {
// Get all the objects we care about from the datastore using ListFunc.
objMap, err := c.ListFunc()
if err != nil {
c.log.WithError(err).Errorf("unable to list objects from datastore while reconciling.")
return err
}
// Build a map of existing keys in the datastore.
allKeys := map[string]bool{}
for key := range objMap {
allKeys[key] = true
}
// Also add all existing keys in the cache.
for _, key := range c.ListKeys() {
allKeys[key] = true
}
c.log.Debugf("Reconciling %d keys in total", len(allKeys))
for key := range allKeys {
cachedObj, existsInCache := c.Get(key)
if !existsInCache {
// Key does not exist in the cache, queue an update to
// remove it from the datastore if configured to do so.
if !c.reconcilerConfig.DisableMissingInCache {
c.log.WithField("key", key).Warn("Value for key should not exist, queueing update to remove")
c.workqueue.Add(key)
}
continue
}
obj, existsInDatastore := objMap[key]
if !existsInDatastore {
// Key exists in the cache but not in the datastore - queue an update
// to re-add it if configured to do so.
if !c.reconcilerConfig.DisableMissingInDatastore {
c.log.WithField("key", key).Warn("Value for key is missing in datastore, queueing update to reprogram")
c.workqueue.Add(key)
}
continue
}
if !reflect.DeepEqual(obj, cachedObj) {
// Objects differ - queue an update to re-program if configured to do so.
if !c.reconcilerConfig.DisableUpdateOnChange {
c.log.WithField("key", key).Warn("Value for key has changed, queueing update to reprogram")
c.log.Debugf("Cached: %#v", cachedObj)
c.log.Debugf("Updated: %#v", obj)
c.workqueue.Add(key)
}
continue
}
}
return nil
}

View File

@@ -0,0 +1,28 @@
// Copyright (c) 2017 Tigera, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package converter
// Converter Responsible for conversion of given kubernetes object to equivalent calico object
type Converter interface {
// Converts kubernetes object to calico representation of it.
Convert(k8sObj interface{}) (interface{}, error)
// Returns appropriate key for the object
GetKey(obj interface{}) string
// DeleteArgsFromKey returns name and namespace of the object to pass to Delete
// for the given key as generated by GetKey.
DeleteArgsFromKey(key string) (string, string)
}

View File

@@ -0,0 +1,73 @@
// Copyright (c) 2017-2021 Tigera, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package converter
import (
"fmt"
api "github.com/projectcalico/api/pkg/apis/projectcalico/v3"
"github.com/projectcalico/calico/libcalico-go/lib/backend/k8s/conversion"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/cache"
)
type namespaceConverter struct {
}
// NewNamespaceConverter Constructor for namespaceConverter
func NewNamespaceConverter() Converter {
return &namespaceConverter{}
}
func (nc *namespaceConverter) Convert(k8sObj interface{}) (interface{}, error) {
c := conversion.NewConverter()
namespace, ok := k8sObj.(*v1.Namespace)
if !ok {
tombstone, ok := k8sObj.(cache.DeletedFinalStateUnknown)
if !ok {
return nil, fmt.Errorf("couldn't get object from tombstone %+v", k8sObj)
}
namespace, ok = tombstone.Obj.(*v1.Namespace)
if !ok {
return nil, fmt.Errorf("tombstone contained object that is not a Namespace %+v", k8sObj)
}
}
kvp, err := c.NamespaceToProfile(namespace)
if err != nil {
return nil, err
}
profile := kvp.Value.(*api.Profile)
// Isolate the metadata fields that we care about. ResourceVersion, CreationTimeStamp, etc are
// not relevant so we ignore them. This prevents unnecessary updates.
profile.ObjectMeta = metav1.ObjectMeta{Name: profile.Name}
return *profile, nil
}
// GetKey returns name of the Profile as its key. For Profiles
// backed by Kubernetes namespaces and managed by this controller, the name
// is of format `kns.name`.
func (nc *namespaceConverter) GetKey(obj interface{}) string {
profile := obj.(api.Profile)
return profile.Name
}
func (p *namespaceConverter) DeleteArgsFromKey(key string) (string, string) {
// Not namespaced, so just return the key, which is the profile name.
return "", key
}

View File

@@ -0,0 +1,82 @@
// Copyright (c) 2017-2021 Tigera, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package converter
import (
"errors"
"fmt"
"strings"
api "github.com/projectcalico/api/pkg/apis/projectcalico/v3"
"github.com/projectcalico/calico/libcalico-go/lib/backend/k8s/conversion"
cerrors "github.com/projectcalico/calico/libcalico-go/lib/errors"
networkingv1 "k8s.io/api/networking/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/cache"
)
type policyConverter struct {
}
// NewPolicyConverter Constructor for policyConverter
func NewPolicyConverter() Converter {
return &policyConverter{}
}
// Convert takes a Kubernetes NetworkPolicy and returns a Calico api.NetworkPolicy representation.
func (p *policyConverter) Convert(k8sObj interface{}) (interface{}, error) {
np, ok := k8sObj.(*networkingv1.NetworkPolicy)
if !ok {
tombstone, ok := k8sObj.(cache.DeletedFinalStateUnknown)
if !ok {
return nil, fmt.Errorf("couldn't get object from tombstone %+v", k8sObj)
}
np, ok = tombstone.Obj.(*networkingv1.NetworkPolicy)
if !ok {
return nil, fmt.Errorf("tombstone contained object that is not a NetworkPolicy %+v", k8sObj)
}
}
c := conversion.NewConverter()
kvp, err := c.K8sNetworkPolicyToCalico(np)
// Silently ignore rule conversion errors. We don't expect any conversion errors
// since the data given to us here is validated by the Kubernetes API. The conversion
// code ignores any rules that it cannot parse, and we will pass the valid ones to Felix.
var e *cerrors.ErrorPolicyConversion
if err != nil && !errors.As(err, &e) {
return nil, err
}
cnp := kvp.Value.(*api.NetworkPolicy)
// Isolate the metadata fields that we care about. ResourceVersion, CreationTimeStamp, etc are
// not relevant so we ignore them. This prevents unnecessary updates.
cnp.ObjectMeta = metav1.ObjectMeta{Name: cnp.Name, Namespace: cnp.Namespace}
return *cnp, err
}
// GetKey returns the 'namespace/name' for the given Calico NetworkPolicy as its key.
func (p *policyConverter) GetKey(obj interface{}) string {
policy := obj.(api.NetworkPolicy)
return fmt.Sprintf("%s/%s", policy.Namespace, policy.Name)
}
func (p *policyConverter) DeleteArgsFromKey(key string) (string, string) {
splits := strings.SplitN(key, "/", 2)
return splits[0], splits[1]
}

View File

@@ -0,0 +1,146 @@
// Copyright (c) 2017-2020 Tigera, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package converter
import (
"errors"
"fmt"
"github.com/projectcalico/calico/libcalico-go/lib/backend/model"
log "github.com/sirupsen/logrus"
api "github.com/projectcalico/calico/libcalico-go/lib/apis/v3"
"github.com/projectcalico/calico/libcalico-go/lib/backend/k8s/conversion"
v1 "k8s.io/api/core/v1"
"k8s.io/client-go/tools/cache"
)
// WorkloadEndpointData is an internal struct used to store the various bits
// of information that the policy controller cares about on a workload endpoint.
type WorkloadEndpointData struct {
PodName string
Namespace string
Labels map[string]string
ServiceAccount string
}
type PodConverter interface {
Convert(k8sObj interface{}) ([]WorkloadEndpointData, error)
GetKey(obj WorkloadEndpointData) string
DeleteArgsFromKey(key string) (string, string)
}
type podConverter struct{}
// BuildWorkloadEndpointData generates the correct WorkloadEndpointData for the given
// list of WorkloadEndpoints, extracting fields that the policy controller is responsible
// for syncing.
func BuildWorkloadEndpointData(weps ...api.WorkloadEndpoint) []WorkloadEndpointData {
var retWEPs []WorkloadEndpointData
for _, wep := range weps {
retWEPs = append(retWEPs, WorkloadEndpointData{
PodName: wep.Spec.Pod,
Namespace: wep.Namespace,
Labels: wep.Labels,
ServiceAccount: wep.Spec.ServiceAccountName,
})
}
return retWEPs
}
// MergeWorkloadEndpointData applies the given WorkloadEndpointData to the provided
// WorkloadEndpoint, updating relevant fields with new values.
func MergeWorkloadEndpointData(wep *api.WorkloadEndpoint, upd WorkloadEndpointData) {
if wep.Spec.Pod != upd.PodName || wep.Namespace != upd.Namespace {
log.Fatalf("Bad attempt to merge data for %s/%s into wep %s/%s", upd.PodName, upd.Namespace, wep.Name, wep.Namespace)
}
wep.Labels = upd.Labels
wep.Spec.ServiceAccountName = upd.ServiceAccount
}
// NewPodConverter Constructor for podConverter
func NewPodConverter() PodConverter {
return &podConverter{}
}
func (p *podConverter) Convert(k8sObj interface{}) ([]WorkloadEndpointData, error) {
// Convert Pod into a workload endpoint.
c := conversion.NewConverter()
pod, err := ExtractPodFromUpdate(k8sObj)
if err != nil {
return nil, err
}
// The conversion logic always requires a node, but we don't always have one. We don't actually
// care about the value used for the node in this controller, so just dummy it out if it doesn't exist.
if pod.Spec.NodeName == "" {
pod.Spec.NodeName = "unknown.node"
}
kvps, err := c.PodToWorkloadEndpoints(pod)
if err != nil {
return nil, err
}
// Build and return a WorkloadEndpointData struct using the data.
return BuildWorkloadEndpointData(kvpsToWEPs(kvps)...), nil
}
func kvpsToWEPs(kvps []*model.KVPair) []api.WorkloadEndpoint {
var weps []api.WorkloadEndpoint
for _, kvp := range kvps {
wep := kvp.Value.(*api.WorkloadEndpoint)
if wep != nil {
weps = append(weps, *wep)
}
}
return weps
}
// GetKey takes a WorkloadEndpointData and returns the key which
// identifies it - namespace/name
func (p *podConverter) GetKey(obj WorkloadEndpointData) string {
return fmt.Sprintf("%s/%s", obj.Namespace, obj.PodName)
}
func (p *podConverter) DeleteArgsFromKey(key string) (string, string) {
// We don't have enough information to generate the delete args from the key that's used
// for Pods / WorkloadEndpoints, so just panic. This should never be called but is necessary
// to satisfy the interface.
log.Panicf("DeleteArgsFromKey call for WorkloadEndpoints is not allowed")
return "", ""
}
// ExtractPodFromUpdate takes an update as received from the informer and returns the pod object, if present.
// some updates (particularly deletes) can include tombstone placeholders rather than an exact pod object. This
// function should be called in order to safely handles those cases.
func ExtractPodFromUpdate(obj interface{}) (*v1.Pod, error) {
pod, ok := obj.(*v1.Pod)
if !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
return nil, errors.New("couldn't get object from tombstone")
}
pod, ok = tombstone.Obj.(*v1.Pod)
if !ok {
return nil, errors.New("tombstone contained object that is not a Pod")
}
}
return pod, nil
}

View File

@@ -0,0 +1,74 @@
// Copyright (c) 2018-2020 Tigera, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package converter
import (
"fmt"
api "github.com/projectcalico/api/pkg/apis/projectcalico/v3"
"github.com/projectcalico/calico/libcalico-go/lib/backend/k8s/conversion"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/cache"
)
type serviceAccountConverter struct {
}
// NewServiceaccountConverter Constructor to convert ServiceAccount to Profile
func NewServiceAccountConverter() Converter {
return &serviceAccountConverter{}
}
func (nc *serviceAccountConverter) Convert(k8sObj interface{}) (interface{}, error) {
c := conversion.NewConverter()
serviceAccount, ok := k8sObj.(*v1.ServiceAccount)
if !ok {
tombstone, ok := k8sObj.(cache.DeletedFinalStateUnknown)
if !ok {
return nil, fmt.Errorf("couldn't get object from tombstone %+v", k8sObj)
}
serviceAccount, ok = tombstone.Obj.(*v1.ServiceAccount)
if !ok {
return nil, fmt.Errorf("tombstone contained object that is not a Serviceaccount %+v", k8sObj)
}
}
kvp, err := c.ServiceAccountToProfile(serviceAccount)
if err != nil {
return nil, err
}
profile := kvp.Value.(*api.Profile)
// Isolate the metadata fields that we care about. ResourceVersion, CreationTimeStamp, etc are
// not relevant so we ignore them. This prevents unnecessary updates.
profile.ObjectMeta = metav1.ObjectMeta{Name: profile.Name}
return *profile, nil
}
// GetKey returns name of the Profile as its key. For Profiles
// backed by Kubernetes serviceaccounts and managed by this controller, the name
// is of format `ksa.namespace.name`.
func (nc *serviceAccountConverter) GetKey(obj interface{}) string {
profile := obj.(api.Profile)
return profile.Name
}
func (nc *serviceAccountConverter) DeleteArgsFromKey(key string) (string, string) {
// Not serviceaccount, so just return the key, which is the profile name.
return "", key
}

View File

@@ -0,0 +1,176 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.

View File

@@ -0,0 +1,81 @@
// Copyright (c) 2019 Tigera, Inc. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v3
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
apiv3 "github.com/projectcalico/api/pkg/apis/projectcalico/v3"
)
const (
KindBlockAffinity = "BlockAffinity"
KindBlockAffinityList = "BlockAffinityList"
)
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// BlockAffinity maintains a block affinity's state
type BlockAffinity struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
metav1.ObjectMeta `json:"metadata,omitempty"`
// Specification of the BlockAffinity.
Spec BlockAffinitySpec `json:"spec,omitempty"`
}
// BlockAffinitySpec contains the specification for a BlockAffinity resource.
type BlockAffinitySpec struct {
State string `json:"state"`
Node string `json:"node"`
CIDR string `json:"cidr"`
// Deleted indicates that this block affinity is being deleted.
// This field is a string for compatibility with older releases that
// mistakenly treat this field as a string.
Deleted string `json:"deleted"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// BlockAffinityList contains a list of BlockAffinity resources.
type BlockAffinityList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
Items []BlockAffinity `json:"items"`
}
// NewBlockAffinity creates a new (zeroed) BlockAffinity struct with the TypeMetadata initialised to the current
// version.
func NewBlockAffinity() *BlockAffinity {
return &BlockAffinity{
TypeMeta: metav1.TypeMeta{
Kind: KindBlockAffinity,
APIVersion: apiv3.GroupVersionCurrent,
},
}
}
// NewBlockAffinityList creates a new (zeroed) BlockAffinityList struct with the TypeMetadata initialised to the current
// version.
func NewBlockAffinityList() *BlockAffinityList {
return &BlockAffinityList{
TypeMeta: metav1.TypeMeta{
Kind: KindBlockAffinityList,
APIVersion: apiv3.GroupVersionCurrent,
},
}
}

View File

@@ -0,0 +1,26 @@
// Copyright (c) 2017 Tigera, Inc. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
Package v3 implements the resource definitions used on the Calico client API.
The resource structures include the JSON tags for each exposed field. These are standard
golang tags that define the JSON format of the structures as used by calicoctl. The YAML
format also used by calicoctl is directly mapped from the JSON.
*/
// +k8s:deepcopy-gen=package,register
// +k8s:openapi-gen=true
package v3

View File

@@ -0,0 +1,121 @@
// Copyright (c) 2019 Tigera, Inc. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v3
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
apiv3 "github.com/projectcalico/api/pkg/apis/projectcalico/v3"
)
const (
KindIPAMBlock = "IPAMBlock"
KindIPAMBlockList = "IPAMBlockList"
)
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// IPAMBlock contains information about a block for IP address assignment.
type IPAMBlock struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
metav1.ObjectMeta `json:"metadata,omitempty"`
// Specification of the IPAMBlock.
Spec IPAMBlockSpec `json:"spec,omitempty"`
}
// IPAMBlockSpec contains the specification for an IPAMBlock resource.
type IPAMBlockSpec struct {
// The block's CIDR.
CIDR string `json:"cidr"`
// Affinity of the block, if this block has one. If set, it will be of the form
// "host:<hostname>". If not set, this block is not affine to a host.
Affinity *string `json:"affinity,omitempty"`
// Array of allocations in-use within this block. nil entries mean the allocation is free.
// For non-nil entries at index i, the index is the ordinal of the allocation within this block
// and the value is the index of the associated attributes in the Attributes array.
Allocations []*int `json:"allocations"`
// Unallocated is an ordered list of allocations which are free in the block.
Unallocated []int `json:"unallocated"`
// Attributes is an array of arbitrary metadata associated with allocations in the block. To find
// attributes for a given allocation, use the value of the allocation's entry in the Allocations array
// as the index of the element in this array.
Attributes []AllocationAttribute `json:"attributes"`
// We store a sequence number that is updated each time the block is written.
// Each allocation will also store the sequence number of the block at the time of its creation.
// When releasing an IP, passing the sequence number associated with the allocation allows us
// to protect against a race condition and ensure the IP hasn't been released and re-allocated
// since the release request.
//
// +kubebuilder:default=0
// +optional
SequenceNumber uint64 `json:"sequenceNumber"`
// Map of allocated ordinal within the block to sequence number of the block at
// the time of allocation. Kubernetes does not allow numerical keys for maps, so
// the key is cast to a string.
// +optional
SequenceNumberForAllocation map[string]uint64 `json:"sequenceNumberForAllocation"`
// Deleted is an internal boolean used to workaround a limitation in the Kubernetes API whereby
// deletion will not return a conflict error if the block has been updated. It should not be set manually.
// +optional
Deleted bool `json:"deleted"`
// StrictAffinity on the IPAMBlock is deprecated and no longer used by the code. Use IPAMConfig StrictAffinity instead.
DeprecatedStrictAffinity bool `json:"strictAffinity"`
}
type AllocationAttribute struct {
AttrPrimary *string `json:"handle_id,omitempty"`
AttrSecondary map[string]string `json:"secondary,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// IPAMBlockList contains a list of IPAMBlock resources.
type IPAMBlockList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
Items []IPAMBlock `json:"items"`
}
// NewIPAMBlock creates a new (zeroed) IPAMBlock struct with the TypeMetadata initialised to the current
// version.
func NewIPAMBlock() *IPAMBlock {
return &IPAMBlock{
TypeMeta: metav1.TypeMeta{
Kind: KindIPAMBlock,
APIVersion: apiv3.GroupVersionCurrent,
},
}
}
// NewIPAMBlockList creates a new (zeroed) IPAMBlockList struct with the TypeMetadata initialised to the current
// version.
func NewIPAMBlockList() *IPAMBlockList {
return &IPAMBlockList{
TypeMeta: metav1.TypeMeta{
Kind: KindIPAMBlockList,
APIVersion: apiv3.GroupVersionCurrent,
},
}
}

View File

@@ -0,0 +1,85 @@
// Copyright (c) 2019 Tigera, Inc. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v3
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
apiv3 "github.com/projectcalico/api/pkg/apis/projectcalico/v3"
)
const (
KindIPAMConfig = "IPAMConfig"
KindIPAMConfigList = "IPAMConfigList"
GlobalIPAMConfigName = "default"
)
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// IPAMConfig contains information about a block for IP address assignment.
type IPAMConfig struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
metav1.ObjectMeta `json:"metadata,omitempty"`
// Specification of the IPAMConfig.
Spec IPAMConfigSpec `json:"spec,omitempty"`
}
// IPAMConfigSpec contains the specification for an IPAMConfig resource.
type IPAMConfigSpec struct {
StrictAffinity bool `json:"strictAffinity"`
AutoAllocateBlocks bool `json:"autoAllocateBlocks"`
// MaxBlocksPerHost, if non-zero, is the max number of blocks that can be
// affine to each host.
// +kubebuilder:validation:Minimum:=0
// +kubebuilder:validation:Maximum:=2147483647
// +optional
MaxBlocksPerHost int `json:"maxBlocksPerHost,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// IPAMConfigList contains a list of IPAMConfig resources.
type IPAMConfigList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
Items []IPAMConfig `json:"items"`
}
// NewIPAMConfig creates a new (zeroed) IPAMConfig struct with the TypeMetadata initialised to the current
// version.
func NewIPAMConfig() *IPAMConfig {
return &IPAMConfig{
TypeMeta: metav1.TypeMeta{
Kind: KindIPAMConfig,
APIVersion: apiv3.GroupVersionCurrent,
},
}
}
// NewIPAMConfigList creates a new (zeroed) IPAMConfigList struct with the TypeMetadata initialised to the current
// version.
func NewIPAMConfigList() *IPAMConfigList {
return &IPAMConfigList{
TypeMeta: metav1.TypeMeta{
Kind: KindIPAMConfigList,
APIVersion: apiv3.GroupVersionCurrent,
},
}
}

View File

@@ -0,0 +1,78 @@
// Copyright (c) 2019 Tigera, Inc. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v3
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
apiv3 "github.com/projectcalico/api/pkg/apis/projectcalico/v3"
)
const (
KindIPAMHandle = "IPAMHandle"
KindIPAMHandleList = "IPAMHandleList"
)
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// IPAMHandle contains information about an IPAMHandle resource.
type IPAMHandle struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
metav1.ObjectMeta `json:"metadata,omitempty"`
// Specification of the IPAMHandle.
Spec IPAMHandleSpec `json:"spec,omitempty"`
}
// IPAMHandleSpec contains the specification for an IPAMHandle resource.
type IPAMHandleSpec struct {
HandleID string `json:"handleID"`
Block map[string]int `json:"block"`
// +optional
Deleted bool `json:"deleted"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// IPAMHandleList contains a list of IPAMHandle resources.
type IPAMHandleList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
Items []IPAMHandle `json:"items"`
}
// NewIPAMHandle creates a new (zeroed) IPAMHandle struct with the TypeMetadata initialised to the current
// version.
func NewIPAMHandle() *IPAMHandle {
return &IPAMHandle{
TypeMeta: metav1.TypeMeta{
Kind: KindIPAMHandle,
APIVersion: apiv3.GroupVersionCurrent,
},
}
}
// NewIPAMHandleList creates a new (zeroed) IPAMHandleList struct with the TypeMetadata initialised to the current
// version.
func NewIPAMHandleList() *IPAMHandleList {
return &IPAMHandleList{
TypeMeta: metav1.TypeMeta{
Kind: KindIPAMHandleList,
APIVersion: apiv3.GroupVersionCurrent,
},
}
}

View File

@@ -0,0 +1,160 @@
// Copyright (c) 2017,2020 Tigera, Inc. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v3
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
apiv3 "github.com/projectcalico/api/pkg/apis/projectcalico/v3"
"github.com/projectcalico/api/pkg/lib/numorstring"
)
const (
KindNode = "Node"
KindNodeList = "NodeList"
CalicoNodeIP = "CalicoNodeIP"
InternalIP = "InternalIP"
ExternalIP = "ExternalIP"
)
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Node contains information about a Node resource.
type Node struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
metav1.ObjectMeta `json:"metadata,omitempty"`
// Specification of the Node.
Spec NodeSpec `json:"spec,omitempty"`
// Status of the Node.
Status NodeStatus `json:"status,omitempty"`
}
// NodeSpec contains the specification for a Node resource.
type NodeSpec struct {
// BGP configuration for this node.
BGP *NodeBGPSpec `json:"bgp,omitempty" validate:"omitempty"`
// IPv4VXLANTunnelAddr is the IPv4 address of the VXLAN tunnel.
IPv4VXLANTunnelAddr string `json:"ipv4VXLANTunnelAddr,omitempty" validate:"omitempty,ipv4"`
// VXLANTunnelMACAddr is the MAC address of the VXLAN tunnel.
VXLANTunnelMACAddr string `json:"vxlanTunnelMACAddr,omitempty" validate:"omitempty,mac"`
// IPv6VXLANTunnelAddr is the address of the IPv6 VXLAN tunnel.
IPv6VXLANTunnelAddr string `json:"ipv6VXLANTunnelAddr,omitempty" validate:"omitempty,ipv6"`
// VXLANTunnelMACAddrV6 is the MAC address of the IPv6 VXLAN tunnel.
VXLANTunnelMACAddrV6 string `json:"vxlanTunnelMACAddrV6,omitempty" validate:"omitempty,mac"`
// OrchRefs for this node.
OrchRefs []OrchRef `json:"orchRefs,omitempty" validate:"omitempty"`
// Wireguard configuration for this node.
Wireguard *NodeWireguardSpec `json:"wireguard,omitempty" validate:"omitempty"`
// Addresses list address that a client can reach the node at.
Addresses []NodeAddress `json:"addresses,omitempty" validate:"omitempty"`
}
// NodeAddress represents an address assigned to a node.
type NodeAddress struct {
// Address is a string representation of the actual address.
Address string `json:"address" validate:"net"`
// Type is the node IP type
Type string `json:"type,omitempty" validate:"omitempty,ipType"`
}
type NodeStatus struct {
// WireguardPublicKey is the IPv4 Wireguard public-key for this node.
// wireguardPublicKey validates if the string is a valid base64 encoded key.
WireguardPublicKey string `json:"wireguardPublicKey,omitempty" validate:"omitempty,wireguardPublicKey"`
// WireguardPublicKeyV6 is the IPv6 Wireguard public-key for this node.
// wireguardPublicKey validates if the string is a valid base64 encoded key.
WireguardPublicKeyV6 string `json:"wireguardPublicKeyV6,omitempty" validate:"omitempty,wireguardPublicKey"`
// PodCIDR is a reflection of the Kubernetes node's spec.PodCIDRs field.
PodCIDRs []string `json:"podCIDRs,omitempty" validate:"omitempty"`
}
// OrchRef is used to correlate a Calico node to its corresponding representation in a given orchestrator
type OrchRef struct {
// NodeName represents the name for this node according to the orchestrator.
NodeName string `json:"nodeName,omitempty" validate:"omitempty"`
// Orchestrator represents the orchestrator using this node.
Orchestrator string `json:"orchestrator"`
}
// NodeBGPSpec contains the specification for the Node BGP configuration.
type NodeBGPSpec struct {
// The AS Number of the node. If this is not specified, the global
// default value will be used.
ASNumber *numorstring.ASNumber `json:"asNumber,omitempty"`
// IPv4Address is the IPv4 address and network of this node. The IPv4 address
// should always be specified if you are using BGP.
IPv4Address string `json:"ipv4Address,omitempty" validate:"omitempty,cidrv4"`
// IPv6Address is the IPv6 address and network of this node. Not required if you
// are not using BGP or you do not require IPv6 routing.
IPv6Address string `json:"ipv6Address,omitempty" validate:"omitempty,cidrv6"`
// IPv4IPIPTunnelAddr is the IPv4 address of the IP in IP tunnel.
IPv4IPIPTunnelAddr string `json:"ipv4IPIPTunnelAddr,omitempty" validate:"omitempty,ipv4"`
// RouteReflectorClusterID enables this node as a route reflector within the given
// cluster.
RouteReflectorClusterID string `json:"routeReflectorClusterID,omitempty" validate:"omitempty,ipv4"`
}
// NodeWireguardSpec contains the specification for the Node wireguard configuration.
type NodeWireguardSpec struct {
// InterfaceIPv4Address is the IP address for the IPv4 Wireguard interface.
InterfaceIPv4Address string `json:"interfaceIPv4Address,omitempty" validate:"omitempty,ipv4"`
// InterfaceIPv6Address is the IP address for the IPv6 Wireguard interface.
InterfaceIPv6Address string `json:"interfaceIPv6Address,omitempty" validate:"omitempty,ipv6"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// NodeList contains a list of Node resources.
type NodeList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
Items []Node `json:"items"`
}
// NewNode creates a new (zeroed) Node struct with the TypeMetadata initialised to the current
// version.
func NewNode() *Node {
return &Node{
TypeMeta: metav1.TypeMeta{
Kind: KindNode,
APIVersion: apiv3.GroupVersionCurrent,
},
}
}
// NewNodeList creates a new (zeroed) NodeList struct with the TypeMetadata initialised to the current
// version.
func NewNodeList() *NodeList {
return &NodeList{
TypeMeta: metav1.TypeMeta{
Kind: KindNodeList,
APIVersion: apiv3.GroupVersionCurrent,
},
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,45 @@
// Copyright (c) 2016-2017 Tigera, Inc. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v3
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
var SchemeGroupVersion = schema.GroupVersion{Group: "crd.projectcalico.org", Version: "v1"}
var (
SchemeBuilder runtime.SchemeBuilder
localSchemeBuilder = &SchemeBuilder
AddToScheme = localSchemeBuilder.AddToScheme
)
func init() {
// We only register manually written functions here. The registration of the
// generated functions takes place in the generated files. The separation
// makes the code compile even when the generated files are missing.
localSchemeBuilder.Register(addKnownTypes)
}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
// Adds the list of known types to api.Scheme.
func addKnownTypes(scheme *runtime.Scheme) error {
return nil
}

View File

@@ -0,0 +1,134 @@
// Copyright (c) 2017 Tigera, Inc. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v3
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
apiv3 "github.com/projectcalico/api/pkg/apis/projectcalico/v3"
"github.com/projectcalico/api/pkg/lib/numorstring"
)
const (
KindWorkloadEndpoint = "WorkloadEndpoint"
KindWorkloadEndpointList = "WorkloadEndpointList"
)
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// WorkloadEndpoint contains information about a WorkloadEndpoint resource that is a peer of a Calico
// compute node.
type WorkloadEndpoint struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
metav1.ObjectMeta `json:"metadata,omitempty"`
// Specification of the WorkloadEndpoint.
Spec WorkloadEndpointSpec `json:"spec,omitempty"`
}
// WorkloadEndpointMetadata contains the specification for a WorkloadEndpoint resource.
type WorkloadEndpointSpec struct {
// The name of the orchestrator.
Orchestrator string `json:"orchestrator,omitempty" validate:"omitempty,name"`
// The name of the workload.
Workload string `json:"workload,omitempty" validate:"omitempty,name"`
// The node name identifying the Calico node instance.
Node string `json:"node,omitempty" validate:"omitempty,name"`
// The container ID.
ContainerID string `json:"containerID,omitempty" validate:"omitempty,containerID"`
// The Pod name.
Pod string `json:"pod,omitempty" validate:"omitempty,name"`
// The Endpoint name.
Endpoint string `json:"endpoint,omitempty" validate:"omitempty,name"`
// ServiceAccountName, if specified, is the name of the k8s ServiceAccount for this pod.
ServiceAccountName string `json:"serviceAccountName,omitempty" validate:"omitempty,name"`
// IPNetworks is a list of subnets allocated to this endpoint. IP packets will only be
// allowed to leave this interface if they come from an address in one of these subnets.
// Currently only /32 for IPv4 and /128 for IPv6 networks are supported.
IPNetworks []string `json:"ipNetworks,omitempty" validate:"omitempty,dive,net"`
// IPNATs is a list of 1:1 NAT mappings to apply to the endpoint. Inbound connections
// to the external IP will be forwarded to the internal IP. Connections initiated from the
// internal IP will not have their source address changed, except when an endpoint attempts
// to connect one of its own external IPs. Each internal IP must be associated with the same
// endpoint via the configured IPNetworks.
IPNATs []IPNAT `json:"ipNATs,omitempty" validate:"omitempty,dive"`
// IPv4Gateway is the gateway IPv4 address for traffic from the workload.
IPv4Gateway string `json:"ipv4Gateway,omitempty" validate:"omitempty,ipv4"`
// IPv6Gateway is the gateway IPv6 address for traffic from the workload.
IPv6Gateway string `json:"ipv6Gateway,omitempty" validate:"omitempty,ipv6"`
// A list of security Profile resources that apply to this endpoint. Each profile is
// applied in the order that they appear in this list. Profile rules are applied
// after the selector-based security policy.
Profiles []string `json:"profiles,omitempty" validate:"omitempty,dive,name"`
// InterfaceName the name of the Linux interface on the host: for example, tap80.
InterfaceName string `json:"interfaceName,omitempty" validate:"interface"`
// MAC is the MAC address of the endpoint interface.
MAC string `json:"mac,omitempty" validate:"omitempty,mac"`
// Ports contains the endpoint's named ports, which may be referenced in security policy rules.
Ports []WorkloadEndpointPort `json:"ports,omitempty" validate:"dive,omitempty"`
// AllowSpoofedSourcePrefixes is a list of CIDRs that the endpoint should be able to send traffic from,
// bypassing the RPF check.
AllowSpoofedSourcePrefixes []string `json:"allowSpoofedSourcePrefixes,omitempty" validate:"omitempty,dive,cidr"`
}
// WorkloadEndpointPort represents one endpoint's named or mapped port
type WorkloadEndpointPort struct {
Name string `json:"name" validate:"omitempty,portName"`
Protocol numorstring.Protocol `json:"protocol"`
Port uint16 `json:"port" validate:"gt=0"`
HostPort uint16 `json:"hostPort"`
HostIP string `json:"hostIP" validate:"omitempty,net"`
}
// IPNat contains a single NAT mapping for a WorkloadEndpoint resource.
type IPNAT struct {
// The internal IP address which must be associated with the owning endpoint via the
// configured IPNetworks for the endpoint.
InternalIP string `json:"internalIP" validate:"omitempty,ip"`
// The external IP address.
ExternalIP string `json:"externalIP" validate:"omitempty,ip"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// WorkloadEndpointList contains a list of WorkloadEndpoint resources.
type WorkloadEndpointList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
Items []WorkloadEndpoint `json:"items"`
}
// NewWorkloadEndpoint creates a new (zeroed) WorkloadEndpoint struct with the TypeMetadata initialised to the current
// version.
func NewWorkloadEndpoint() *WorkloadEndpoint {
return &WorkloadEndpoint{
TypeMeta: metav1.TypeMeta{
Kind: KindWorkloadEndpoint,
APIVersion: apiv3.GroupVersionCurrent,
},
}
}
// NewWorkloadEndpointList creates a new (zeroed) WorkloadEndpointList struct with the TypeMetadata initialised to the current
// version.
func NewWorkloadEndpointList() *WorkloadEndpointList {
return &WorkloadEndpointList{
TypeMeta: metav1.TypeMeta{
Kind: KindWorkloadEndpointList,
APIVersion: apiv3.GroupVersionCurrent,
},
}
}

View File

@@ -0,0 +1,720 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
// Copyright (c) 2016-2021 Tigera, Inc. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by deepcopy-gen. DO NOT EDIT.
package v3
import (
numorstring "github.com/projectcalico/api/pkg/lib/numorstring"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AllocationAttribute) DeepCopyInto(out *AllocationAttribute) {
*out = *in
if in.AttrPrimary != nil {
in, out := &in.AttrPrimary, &out.AttrPrimary
*out = new(string)
**out = **in
}
if in.AttrSecondary != nil {
in, out := &in.AttrSecondary, &out.AttrSecondary
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllocationAttribute.
func (in *AllocationAttribute) DeepCopy() *AllocationAttribute {
if in == nil {
return nil
}
out := new(AllocationAttribute)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BlockAffinity) DeepCopyInto(out *BlockAffinity) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
out.Spec = in.Spec
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BlockAffinity.
func (in *BlockAffinity) DeepCopy() *BlockAffinity {
if in == nil {
return nil
}
out := new(BlockAffinity)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *BlockAffinity) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BlockAffinityList) DeepCopyInto(out *BlockAffinityList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]BlockAffinity, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BlockAffinityList.
func (in *BlockAffinityList) DeepCopy() *BlockAffinityList {
if in == nil {
return nil
}
out := new(BlockAffinityList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *BlockAffinityList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BlockAffinitySpec) DeepCopyInto(out *BlockAffinitySpec) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BlockAffinitySpec.
func (in *BlockAffinitySpec) DeepCopy() *BlockAffinitySpec {
if in == nil {
return nil
}
out := new(BlockAffinitySpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IPAMBlock) DeepCopyInto(out *IPAMBlock) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAMBlock.
func (in *IPAMBlock) DeepCopy() *IPAMBlock {
if in == nil {
return nil
}
out := new(IPAMBlock)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *IPAMBlock) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IPAMBlockList) DeepCopyInto(out *IPAMBlockList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]IPAMBlock, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAMBlockList.
func (in *IPAMBlockList) DeepCopy() *IPAMBlockList {
if in == nil {
return nil
}
out := new(IPAMBlockList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *IPAMBlockList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IPAMBlockSpec) DeepCopyInto(out *IPAMBlockSpec) {
*out = *in
if in.Affinity != nil {
in, out := &in.Affinity, &out.Affinity
*out = new(string)
**out = **in
}
if in.Allocations != nil {
in, out := &in.Allocations, &out.Allocations
*out = make([]*int, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(int)
**out = **in
}
}
}
if in.Unallocated != nil {
in, out := &in.Unallocated, &out.Unallocated
*out = make([]int, len(*in))
copy(*out, *in)
}
if in.Attributes != nil {
in, out := &in.Attributes, &out.Attributes
*out = make([]AllocationAttribute, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.SequenceNumberForAllocation != nil {
in, out := &in.SequenceNumberForAllocation, &out.SequenceNumberForAllocation
*out = make(map[string]uint64, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAMBlockSpec.
func (in *IPAMBlockSpec) DeepCopy() *IPAMBlockSpec {
if in == nil {
return nil
}
out := new(IPAMBlockSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IPAMConfig) DeepCopyInto(out *IPAMConfig) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
out.Spec = in.Spec
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAMConfig.
func (in *IPAMConfig) DeepCopy() *IPAMConfig {
if in == nil {
return nil
}
out := new(IPAMConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *IPAMConfig) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IPAMConfigList) DeepCopyInto(out *IPAMConfigList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]IPAMConfig, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAMConfigList.
func (in *IPAMConfigList) DeepCopy() *IPAMConfigList {
if in == nil {
return nil
}
out := new(IPAMConfigList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *IPAMConfigList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IPAMConfigSpec) DeepCopyInto(out *IPAMConfigSpec) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAMConfigSpec.
func (in *IPAMConfigSpec) DeepCopy() *IPAMConfigSpec {
if in == nil {
return nil
}
out := new(IPAMConfigSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IPAMHandle) DeepCopyInto(out *IPAMHandle) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAMHandle.
func (in *IPAMHandle) DeepCopy() *IPAMHandle {
if in == nil {
return nil
}
out := new(IPAMHandle)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *IPAMHandle) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IPAMHandleList) DeepCopyInto(out *IPAMHandleList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]IPAMHandle, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAMHandleList.
func (in *IPAMHandleList) DeepCopy() *IPAMHandleList {
if in == nil {
return nil
}
out := new(IPAMHandleList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *IPAMHandleList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IPAMHandleSpec) DeepCopyInto(out *IPAMHandleSpec) {
*out = *in
if in.Block != nil {
in, out := &in.Block, &out.Block
*out = make(map[string]int, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAMHandleSpec.
func (in *IPAMHandleSpec) DeepCopy() *IPAMHandleSpec {
if in == nil {
return nil
}
out := new(IPAMHandleSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IPNAT) DeepCopyInto(out *IPNAT) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPNAT.
func (in *IPNAT) DeepCopy() *IPNAT {
if in == nil {
return nil
}
out := new(IPNAT)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Node) DeepCopyInto(out *Node) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Node.
func (in *Node) DeepCopy() *Node {
if in == nil {
return nil
}
out := new(Node)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Node) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeAddress) DeepCopyInto(out *NodeAddress) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeAddress.
func (in *NodeAddress) DeepCopy() *NodeAddress {
if in == nil {
return nil
}
out := new(NodeAddress)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeBGPSpec) DeepCopyInto(out *NodeBGPSpec) {
*out = *in
if in.ASNumber != nil {
in, out := &in.ASNumber, &out.ASNumber
*out = new(numorstring.ASNumber)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeBGPSpec.
func (in *NodeBGPSpec) DeepCopy() *NodeBGPSpec {
if in == nil {
return nil
}
out := new(NodeBGPSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeList) DeepCopyInto(out *NodeList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Node, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeList.
func (in *NodeList) DeepCopy() *NodeList {
if in == nil {
return nil
}
out := new(NodeList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *NodeList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeSpec) DeepCopyInto(out *NodeSpec) {
*out = *in
if in.BGP != nil {
in, out := &in.BGP, &out.BGP
*out = new(NodeBGPSpec)
(*in).DeepCopyInto(*out)
}
if in.OrchRefs != nil {
in, out := &in.OrchRefs, &out.OrchRefs
*out = make([]OrchRef, len(*in))
copy(*out, *in)
}
if in.Wireguard != nil {
in, out := &in.Wireguard, &out.Wireguard
*out = new(NodeWireguardSpec)
**out = **in
}
if in.Addresses != nil {
in, out := &in.Addresses, &out.Addresses
*out = make([]NodeAddress, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSpec.
func (in *NodeSpec) DeepCopy() *NodeSpec {
if in == nil {
return nil
}
out := new(NodeSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeStatus) DeepCopyInto(out *NodeStatus) {
*out = *in
if in.PodCIDRs != nil {
in, out := &in.PodCIDRs, &out.PodCIDRs
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeStatus.
func (in *NodeStatus) DeepCopy() *NodeStatus {
if in == nil {
return nil
}
out := new(NodeStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeWireguardSpec) DeepCopyInto(out *NodeWireguardSpec) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeWireguardSpec.
func (in *NodeWireguardSpec) DeepCopy() *NodeWireguardSpec {
if in == nil {
return nil
}
out := new(NodeWireguardSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OrchRef) DeepCopyInto(out *OrchRef) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrchRef.
func (in *OrchRef) DeepCopy() *OrchRef {
if in == nil {
return nil
}
out := new(OrchRef)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *WorkloadEndpoint) DeepCopyInto(out *WorkloadEndpoint) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkloadEndpoint.
func (in *WorkloadEndpoint) DeepCopy() *WorkloadEndpoint {
if in == nil {
return nil
}
out := new(WorkloadEndpoint)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *WorkloadEndpoint) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *WorkloadEndpointList) DeepCopyInto(out *WorkloadEndpointList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]WorkloadEndpoint, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkloadEndpointList.
func (in *WorkloadEndpointList) DeepCopy() *WorkloadEndpointList {
if in == nil {
return nil
}
out := new(WorkloadEndpointList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *WorkloadEndpointList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *WorkloadEndpointPort) DeepCopyInto(out *WorkloadEndpointPort) {
*out = *in
out.Protocol = in.Protocol
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkloadEndpointPort.
func (in *WorkloadEndpointPort) DeepCopy() *WorkloadEndpointPort {
if in == nil {
return nil
}
out := new(WorkloadEndpointPort)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *WorkloadEndpointSpec) DeepCopyInto(out *WorkloadEndpointSpec) {
*out = *in
if in.IPNetworks != nil {
in, out := &in.IPNetworks, &out.IPNetworks
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.IPNATs != nil {
in, out := &in.IPNATs, &out.IPNATs
*out = make([]IPNAT, len(*in))
copy(*out, *in)
}
if in.Profiles != nil {
in, out := &in.Profiles, &out.Profiles
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Ports != nil {
in, out := &in.Ports, &out.Ports
*out = make([]WorkloadEndpointPort, len(*in))
copy(*out, *in)
}
if in.AllowSpoofedSourcePrefixes != nil {
in, out := &in.AllowSpoofedSourcePrefixes, &out.AllowSpoofedSourcePrefixes
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkloadEndpointSpec.
func (in *WorkloadEndpointSpec) DeepCopy() *WorkloadEndpointSpec {
if in == nil {
return nil
}
out := new(WorkloadEndpointSpec)
in.DeepCopyInto(out)
return out
}

View File

@@ -0,0 +1,18 @@
// Copyright (c) 2016 Tigera, Inc. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
Package encap implements a field type that represent different encap modes.
*/
package encap

View File

@@ -0,0 +1,25 @@
// Copyright (c) 2016 Tigera, Inc. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package encap
type Mode string
const (
Undefined Mode = ""
Always = "always"
CrossSubnet = "cross-subnet"
)
const DefaultMode = Always

View File

@@ -0,0 +1,47 @@
// Copyright (c) 2017-2021 Tigera, Inc. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package conversion
const (
NamespaceLabelPrefix = "pcns."
NamespaceProfileNamePrefix = "kns."
K8sNetworkPolicyNamePrefix = "knp.default."
ServiceAccountLabelPrefix = "pcsa."
ServiceAccountProfileNamePrefix = "ksa."
// AnnotationPodIP is an annotation we apply to pods when assigning them an IP. It
// duplicates the value of the Pod.Status.PodIP field, which is set by kubelet but,
// since we write it ourselves, we can make sure that it is written synchronously
// and quickly.
//
// We set this annotation to the empty string when the WEP is deleted by the CNI plugin.
// That signals that the IP no longer belongs to this pod.
AnnotationPodIP = "cni.projectcalico.org/podIP"
// AnnotationPodIPs is similar for the plural PodIPs field.
AnnotationPodIPs = "cni.projectcalico.org/podIPs"
// AnnotationPodIPs is the annotation set by the Amazon VPC CNI plugin.
AnnotationAWSPodIPs = "vpc.amazonaws.com/pod-ips"
// AnnotationContainerID stores the container ID of the pod. This allows us to disambiguate different pods
// that have the same name and namespace. For example, stateful set pod that is restarted. May be missing
// on older Pods.
AnnotationContainerID = "cni.projectcalico.org/containerID"
// NameLabel is a label that can be used to match a serviceaccount or namespace
// name exactly.
NameLabel = "projectcalico.org/name"
)

View File

@@ -0,0 +1,811 @@
// Copyright (c) 2016-2021 Tigera, Inc. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package conversion
import (
"fmt"
"sort"
"strings"
log "github.com/sirupsen/logrus"
kapiv1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
discovery "k8s.io/api/discovery/v1"
networkingv1 "k8s.io/api/networking/v1"
"k8s.io/apimachinery/pkg/util/intstr"
apiv3 "github.com/projectcalico/api/pkg/apis/projectcalico/v3"
"github.com/projectcalico/api/pkg/lib/numorstring"
"github.com/projectcalico/calico/libcalico-go/lib/backend/model"
cerrors "github.com/projectcalico/calico/libcalico-go/lib/errors"
"github.com/projectcalico/calico/libcalico-go/lib/names"
cnet "github.com/projectcalico/calico/libcalico-go/lib/net"
)
var (
protoTCP = kapiv1.ProtocolTCP
)
type selectorType int8
const (
SelectorNamespace selectorType = iota
SelectorPod
)
type Converter interface {
WorkloadEndpointConverter
ParseWorkloadEndpointName(workloadName string) (names.WorkloadEndpointIdentifiers, error)
NamespaceToProfile(ns *kapiv1.Namespace) (*model.KVPair, error)
IsValidCalicoWorkloadEndpoint(pod *kapiv1.Pod) bool
IsReadyCalicoPod(pod *kapiv1.Pod) bool
IsScheduled(pod *kapiv1.Pod) bool
IsHostNetworked(pod *kapiv1.Pod) bool
HasIPAddress(pod *kapiv1.Pod) bool
StagedKubernetesNetworkPolicyToStagedName(stagedK8sName string) string
K8sNetworkPolicyToCalico(np *networkingv1.NetworkPolicy) (*model.KVPair, error)
EndpointSliceToKVP(svc *discovery.EndpointSlice) (*model.KVPair, error)
ServiceToKVP(service *kapiv1.Service) (*model.KVPair, error)
ProfileNameToNamespace(profileName string) (string, error)
ServiceAccountToProfile(sa *kapiv1.ServiceAccount) (*model.KVPair, error)
ProfileNameToServiceAccount(profileName string) (ns, sa string, err error)
JoinProfileRevisions(nsRev, saRev string) string
SplitProfileRevision(rev string) (nsRev string, saRev string, err error)
}
type converter struct {
WorkloadEndpointConverter
}
func NewConverter() Converter {
return &converter{
WorkloadEndpointConverter: NewWorkloadEndpointConverter(),
}
}
// ParseWorkloadName extracts the Node name, Orchestrator, Pod name and endpoint from the
// given WorkloadEndpoint name.
// The expected format for k8s is <node>-k8s-<pod>-<endpoint>
func (c converter) ParseWorkloadEndpointName(workloadName string) (names.WorkloadEndpointIdentifiers, error) {
return names.ParseWorkloadEndpointName(workloadName)
}
// NamespaceToProfile converts a Namespace to a Calico Profile. The Profile stores
// labels from the Namespace which are inherited by the WorkloadEndpoints within
// the Profile. This Profile also has the default ingress and egress rules, which are both 'allow'.
func (c converter) NamespaceToProfile(ns *kapiv1.Namespace) (*model.KVPair, error) {
// Generate the labels to apply to the profile, using a special prefix
// to indicate that these are the labels from the parent Kubernetes Namespace.
labels := map[string]string{}
for k, v := range ns.Labels {
labels[NamespaceLabelPrefix+k] = v
}
// Add a label for the namespace's name. This allows exact namespace matching
// based on name within the namespaceSelector.
labels[NamespaceLabelPrefix+NameLabel] = ns.Name
// Create the profile object.
name := NamespaceProfileNamePrefix + ns.Name
profile := apiv3.NewProfile()
profile.ObjectMeta = metav1.ObjectMeta{
Name: name,
CreationTimestamp: ns.CreationTimestamp,
UID: ns.UID,
}
profile.Spec = apiv3.ProfileSpec{
Ingress: []apiv3.Rule{{Action: apiv3.Allow}},
Egress: []apiv3.Rule{{Action: apiv3.Allow}},
LabelsToApply: labels,
}
// Embed the profile in a KVPair.
kvp := model.KVPair{
Key: model.ResourceKey{
Name: name,
Kind: apiv3.KindProfile,
},
Value: profile,
Revision: c.JoinProfileRevisions(ns.ResourceVersion, ""),
}
return &kvp, nil
}
// IsValidCalicoWorkloadEndpoint returns true if the pod should be shown as a workloadEndpoint
// in the Calico API and false otherwise. Note: since we completely ignore notifications for
// invalid Pods, it is important that pods can only transition from not-valid to valid and not
// the other way. If they transition from valid to invalid, we'll fail to emit a deletion
// event in the watcher.
func (c converter) IsValidCalicoWorkloadEndpoint(pod *kapiv1.Pod) bool {
if c.IsHostNetworked(pod) {
log.WithField("pod", pod.Name).Debug("Pod is host networked.")
return false
} else if !c.IsScheduled(pod) {
log.WithField("pod", pod.Name).Debug("Pod is not scheduled.")
return false
}
return true
}
// IsReadyCalicoPod returns true if the pod is a valid Calico WorkloadEndpoint and has
// an IP address assigned (i.e. it's ready for Calico networking).
func (c converter) IsReadyCalicoPod(pod *kapiv1.Pod) bool {
if !c.IsValidCalicoWorkloadEndpoint(pod) {
return false
} else if !c.HasIPAddress(pod) {
log.WithField("pod", pod.Name).Debug("Pod does not have an IP address.")
return false
}
return true
}
const (
// Completed is documented but doesn't seem to be in the API, it should be safe to include.
// Maybe it's in an older version of the API?
podCompleted kapiv1.PodPhase = "Completed"
)
func IsFinished(pod *kapiv1.Pod) bool {
if pod.DeletionTimestamp != nil {
// Pod is being deleted but it may still be in its termination grace period. If Calico CNI
// was used, then we use AnnotationPodIP to signal the moment that the pod actually loses its
// IP by setting the annotation to "". (Otherwise, just fall back on the status of the pod.)
if ip, ok := pod.Annotations[AnnotationPodIP]; ok && ip == "" {
// AnnotationPodIP is explicitly set to empty string, Calico CNI has removed the network
// from the pod.
log.Debug("Pod is being deleted and IPs have been removed by Calico CNI.")
return true
} else if ips, ok := pod.Annotations[AnnotationAWSPodIPs]; ok && ips == "" {
// AnnotationAWSPodIPs is explicitly set to empty string, AWS CNI has removed the network
// from the pod.
log.Debug("Pod is being deleted and IPs have been removed by AWS CNI.")
return true
}
}
switch pod.Status.Phase {
case kapiv1.PodFailed, kapiv1.PodSucceeded, podCompleted:
log.Debug("Pod phase is failed/succeeded/completed.")
return true
}
return false
}
func (c converter) IsScheduled(pod *kapiv1.Pod) bool {
return pod.Spec.NodeName != ""
}
func (c converter) IsHostNetworked(pod *kapiv1.Pod) bool {
return pod.Spec.HostNetwork
}
func (c converter) HasIPAddress(pod *kapiv1.Pod) bool {
return pod.Status.PodIP != "" || pod.Annotations[AnnotationPodIP] != "" || pod.Annotations[AnnotationAWSPodIPs] != ""
// Note: we don't need to check PodIPs and AnnotationPodIPs here, because those cannot be
// non-empty if the corresponding singular field is empty.
}
// getPodIPs extracts the IP addresses from a Kubernetes Pod. We support a single IPv4 address
// and/or a single IPv6. getPodIPs loads the IPs either from the PodIPs and PodIP field, if
// present, or the calico podIP annotation.
func getPodIPs(pod *kapiv1.Pod) ([]*cnet.IPNet, error) {
logc := log.WithFields(log.Fields{"pod": pod.Name, "namespace": pod.Namespace})
var podIPs []string
if ips := pod.Status.PodIPs; len(ips) != 0 {
logc.WithField("ips", ips).Debug("PodIPs field filled in")
for _, ip := range ips {
podIPs = append(podIPs, ip.IP)
}
} else if ip := pod.Status.PodIP; ip != "" {
logc.WithField("ip", ip).Debug("PodIP field filled in")
podIPs = append(podIPs, ip)
} else if ips := pod.Annotations[AnnotationPodIPs]; ips != "" {
logc.WithField("ips", ips).Debug("No PodStatus IPs, use Calico plural annotation")
podIPs = append(podIPs, strings.Split(ips, ",")...)
} else if ip := pod.Annotations[AnnotationPodIP]; ip != "" {
logc.WithField("ip", ip).Debug("No PodStatus IPs, use Calico singular annotation")
podIPs = append(podIPs, ip)
} else if ips := pod.Annotations[AnnotationAWSPodIPs]; ips != "" {
logc.WithField("ips", ips).Debug("No PodStatus IPs, use AWS VPC annotation")
podIPs = append(podIPs, strings.Split(ips, ",")...)
} else {
logc.Debug("Pod has no IP")
return nil, nil
}
var podIPNets []*cnet.IPNet
for _, ip := range podIPs {
_, ipNet, err := cnet.ParseCIDROrIP(ip)
if err != nil {
logc.WithFields(log.Fields{"ip": ip}).WithError(err).Error("Failed to parse pod IP")
return nil, err
}
podIPNets = append(podIPNets, ipNet)
}
return podIPNets, nil
}
// StagedKubernetesNetworkPolicyToStagedName converts a StagedKubernetesNetworkPolicy name into a StagedNetworkPolicy name
func (c converter) StagedKubernetesNetworkPolicyToStagedName(stagedK8sName string) string {
return fmt.Sprintf(K8sNetworkPolicyNamePrefix + stagedK8sName)
}
// EndpointSliceToKVP converts a k8s EndpointSlice to a model.KVPair.
func (c converter) EndpointSliceToKVP(slice *discovery.EndpointSlice) (*model.KVPair, error) {
return &model.KVPair{
Key: model.ResourceKey{
Name: slice.Name,
Namespace: slice.Namespace,
Kind: model.KindKubernetesEndpointSlice,
},
Value: slice.DeepCopy(),
Revision: slice.ResourceVersion,
}, nil
}
func (c converter) ServiceToKVP(service *kapiv1.Service) (*model.KVPair, error) {
return &model.KVPair{
Key: model.ResourceKey{
Name: service.Name,
Namespace: service.Namespace,
Kind: model.KindKubernetesService,
},
Value: service.DeepCopy(),
Revision: service.ResourceVersion,
}, nil
}
// K8sNetworkPolicyToCalico converts a k8s NetworkPolicy to a model.KVPair.
func (c converter) K8sNetworkPolicyToCalico(np *networkingv1.NetworkPolicy) (*model.KVPair, error) {
// Pull out important fields.
policyName := fmt.Sprintf(K8sNetworkPolicyNamePrefix + np.Name)
// We insert all the NetworkPolicy Policies at order 1000.0 after conversion.
// This order might change in future.
order := float64(1000.0)
errorTracker := cerrors.ErrorPolicyConversion{PolicyName: np.Name}
// Generate the ingress rules list.
var ingressRules []apiv3.Rule
for _, r := range np.Spec.Ingress {
rules, err := c.k8sRuleToCalico(r.From, r.Ports, np.Namespace, true)
if err != nil {
log.WithError(err).Warn("dropping k8s rule that couldn't be converted.")
// Add rule to conversion error slice
errorTracker.BadIngressRule(&r, fmt.Sprintf("k8s rule couldn't be converted: %s", err))
} else {
ingressRules = append(ingressRules, rules...)
}
}
// Generate the egress rules list.
var egressRules []apiv3.Rule
for _, r := range np.Spec.Egress {
rules, err := c.k8sRuleToCalico(r.To, r.Ports, np.Namespace, false)
if err != nil {
log.WithError(err).Warn("dropping k8s rule that couldn't be converted")
// Add rule to conversion error slice
errorTracker.BadEgressRule(&r, fmt.Sprintf("k8s rule couldn't be converted: %s", err))
} else {
egressRules = append(egressRules, rules...)
}
}
// Calculate Types setting.
ingress := false
egress := false
for _, policyType := range np.Spec.PolicyTypes {
switch policyType {
case networkingv1.PolicyTypeIngress:
ingress = true
case networkingv1.PolicyTypeEgress:
egress = true
}
}
types := []apiv3.PolicyType{}
if ingress {
types = append(types, apiv3.PolicyTypeIngress)
}
if egress {
types = append(types, apiv3.PolicyTypeEgress)
} else if len(egressRules) > 0 {
// Egress was introduced at the same time as policyTypes. It shouldn't be possible to
// receive a NetworkPolicy with an egress rule but without "Egress" specified in its types,
// but we'll warn about it anyway.
log.Warn("K8s PolicyTypes don't include 'egress', but NetworkPolicy has egress rules.")
}
// If no types were specified in the policy, then we're running on a cluster that doesn't
// include support for that field in the API. In that case, the correct behavior is for the policy
// to apply to only ingress traffic.
if len(types) == 0 {
types = append(types, apiv3.PolicyTypeIngress)
}
// Create the NetworkPolicy.
policy := apiv3.NewNetworkPolicy()
policy.ObjectMeta = metav1.ObjectMeta{
Name: policyName,
Namespace: np.Namespace,
CreationTimestamp: np.CreationTimestamp,
UID: np.UID,
ResourceVersion: np.ResourceVersion,
}
policy.Spec = apiv3.NetworkPolicySpec{
Order: &order,
Selector: c.k8sSelectorToCalico(&np.Spec.PodSelector, SelectorPod),
Ingress: ingressRules,
Egress: egressRules,
Types: types,
}
// Build the KVPair.
kvp := &model.KVPair{
Key: model.ResourceKey{
Name: policyName,
Namespace: np.Namespace,
Kind: apiv3.KindNetworkPolicy,
},
Value: policy,
Revision: np.ResourceVersion,
}
// Return the KVPair with conversion errors if applicable
return kvp, errorTracker.GetError()
}
// k8sSelectorToCalico takes a namespaced k8s label selector and returns the Calico
// equivalent.
func (c converter) k8sSelectorToCalico(s *metav1.LabelSelector, selectorType selectorType) string {
// Only prefix pod selectors - this won't work for namespace selectors.
selectors := []string{}
if selectorType == SelectorPod {
selectors = append(selectors, fmt.Sprintf("%s == 'k8s'", apiv3.LabelOrchestrator))
}
if s == nil {
return strings.Join(selectors, " && ")
}
// For namespace selectors, if they are present but have no terms, it means "select all
// namespaces". We use empty string to represent the nil namespace selector, so use all() to
// represent all namespaces.
if selectorType == SelectorNamespace && len(s.MatchLabels) == 0 && len(s.MatchExpressions) == 0 {
return "all()"
}
// matchLabels is a map key => value, it means match if (label[key] ==
// value) for all keys.
keys := make([]string, 0, len(s.MatchLabels))
for k := range s.MatchLabels {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
v := s.MatchLabels[k]
selectors = append(selectors, fmt.Sprintf("%s == '%s'", k, v))
}
// matchExpressions is a list of in/notin/exists/doesnotexist tests.
for _, e := range s.MatchExpressions {
valueList := strings.Join(e.Values, "', '")
// Each selector is formatted differently based on the operator.
switch e.Operator {
case metav1.LabelSelectorOpIn:
selectors = append(selectors, fmt.Sprintf("%s in { '%s' }", e.Key, valueList))
case metav1.LabelSelectorOpNotIn:
selectors = append(selectors, fmt.Sprintf("%s not in { '%s' }", e.Key, valueList))
case metav1.LabelSelectorOpExists:
selectors = append(selectors, fmt.Sprintf("has(%s)", e.Key))
case metav1.LabelSelectorOpDoesNotExist:
selectors = append(selectors, fmt.Sprintf("! has(%s)", e.Key))
}
}
return strings.Join(selectors, " && ")
}
func (c converter) k8sRuleToCalico(rPeers []networkingv1.NetworkPolicyPeer, rPorts []networkingv1.NetworkPolicyPort, ns string, ingress bool) ([]apiv3.Rule, error) {
rules := []apiv3.Rule{}
peers := []*networkingv1.NetworkPolicyPeer{}
ports := []*networkingv1.NetworkPolicyPort{}
// Built up a list of the sources and a list of the destinations.
for _, f := range rPeers {
// We need to add a copy of the peer so all the rules don't
// point to the same location.
peers = append(peers, &networkingv1.NetworkPolicyPeer{
NamespaceSelector: f.NamespaceSelector,
PodSelector: f.PodSelector,
IPBlock: f.IPBlock,
})
}
for _, p := range rPorts {
// We need to add a copy of the port so all the rules don't
// point to the same location.
port := networkingv1.NetworkPolicyPort{}
if p.Port != nil {
portval := intstr.FromString(p.Port.String())
port.Port = &portval
}
if p.Protocol != nil {
protval := kapiv1.Protocol(fmt.Sprintf("%s", *p.Protocol))
port.Protocol = &protval
} else {
// TCP is the implicit default (as per the definition of NetworkPolicyPort).
// Make the default explicit here because our data-model always requires
// the protocol to be specified if we're doing a port match.
port.Protocol = &protoTCP
}
if p.EndPort != nil {
port.EndPort = p.EndPort
}
ports = append(ports, &port)
}
// If there no peers, or no ports, represent that as nil.
if len(peers) == 0 {
peers = []*networkingv1.NetworkPolicyPeer{nil}
}
if len(ports) == 0 {
ports = []*networkingv1.NetworkPolicyPort{nil}
}
protocolPorts := map[string][]numorstring.Port{}
for _, port := range ports {
protocol, calicoPorts, err := c.k8sPortToCalicoFields(port)
if err != nil {
return nil, fmt.Errorf("failed to parse k8s port: %s", err)
}
if protocol == nil && calicoPorts == nil {
// If nil, no ports were specified, or an empty port struct was provided, which we translate to allowing all.
// We want to use a nil protocol and a nil list of ports, which will allow any destination (for ingress).
// Given we're gonna allow all, we may as well break here and keep only this rule
protocolPorts = map[string][]numorstring.Port{"": nil}
break
}
pStr := protocol.String()
// treat nil as 'all ports'
if calicoPorts == nil {
protocolPorts[pStr] = nil
} else if _, ok := protocolPorts[pStr]; !ok || len(protocolPorts[pStr]) > 0 {
// don't overwrite a nil (allow all ports) if present; if no ports yet for this protocol
// or 1+ ports which aren't 'all ports', then add the present ports
protocolPorts[pStr] = append(protocolPorts[pStr], calicoPorts...)
}
}
protocols := make([]string, 0, len(protocolPorts))
for k := range protocolPorts {
protocols = append(protocols, k)
}
// Ensure deterministic output
sort.Strings(protocols)
// Combine destinations with sources to generate rules. We generate one rule per protocol,
// with each rule containing all the allowed ports.
for _, protocolStr := range protocols {
calicoPorts := protocolPorts[protocolStr]
calicoPorts = SimplifyPorts(calicoPorts)
var protocol *numorstring.Protocol
if protocolStr != "" {
p := numorstring.ProtocolFromString(protocolStr)
protocol = &p
}
for _, peer := range peers {
selector, nsSelector, nets, notNets := c.k8sPeerToCalicoFields(peer, ns)
if ingress {
// Build inbound rule and append to list.
rules = append(rules, apiv3.Rule{
Action: "Allow",
Protocol: protocol,
Source: apiv3.EntityRule{
Selector: selector,
NamespaceSelector: nsSelector,
Nets: nets,
NotNets: notNets,
},
Destination: apiv3.EntityRule{
Ports: calicoPorts,
},
})
} else {
// Build outbound rule and append to list.
rules = append(rules, apiv3.Rule{
Action: "Allow",
Protocol: protocol,
Destination: apiv3.EntityRule{
Ports: calicoPorts,
Selector: selector,
NamespaceSelector: nsSelector,
Nets: nets,
NotNets: notNets,
},
})
}
}
}
return rules, nil
}
// SimplifyPorts calculates a minimum set of port ranges that cover the given set of ports.
// For example, if the input was [80, 81, 82, 9090, "foo"] the output would consist of
// [80-82, 9090, "foo"] in some order.
func SimplifyPorts(ports []numorstring.Port) []numorstring.Port {
if len(ports) <= 1 {
return ports
}
var numericPorts []int
var outputPorts []numorstring.Port
for _, p := range ports {
if p.PortName != "" {
// Pass named ports through immediately, there's nothing to be done for them.
outputPorts = append(outputPorts, p)
} else {
// Work with ints to avoid overflow with the uint16 port type.
// In practice, we currently only get single ports here so this
// loop should run exactly once.
for i := int(p.MinPort); i <= int(p.MaxPort); i++ {
numericPorts = append(numericPorts, i)
}
}
}
if len(numericPorts) <= 1 {
// We have nothing to combine, short-circuit.
return ports
}
// Sort the ports so it will be easy to find ranges.
sort.Ints(numericPorts)
// Each pass around this outer loop extracts one port range from the sorted slice
// and it moves the slice along to the start of the next range.
for len(numericPorts) > 0 {
// Initialise the next range to the contain only the first port in the slice.
firstPortInRange := numericPorts[0]
lastPortInRange := firstPortInRange
// Scan ahead, looking for ports that can be combined into this range.
numericPorts = numericPorts[1:]
for len(numericPorts) > 0 {
nextPort := numericPorts[0]
if nextPort > lastPortInRange+1 {
// This port can't be coalesced with the existing range, break out so
// that we record the range; then we'll loop again and pick up this
// port as the start of a new range.
break
}
// The next port is either equal to the last port (due to a duplicate port
// in the input) or it is exactly one greater. Extend the range to include
// it.
lastPortInRange = nextPort
numericPorts = numericPorts[1:]
}
// Record the port.
outputPorts = appendPortRange(outputPorts, firstPortInRange, lastPortInRange)
}
return outputPorts
}
func appendPortRange(ports []numorstring.Port, first, last int) []numorstring.Port {
portRange, err := numorstring.PortFromRange(uint16(first), uint16(last))
if err != nil {
log.WithError(err).Panic("Failed to make port range from ports that should have been pre-validated.")
}
return append(ports, portRange)
}
func (c converter) k8sPortToCalicoFields(port *networkingv1.NetworkPolicyPort) (protocol *numorstring.Protocol, dstPorts []numorstring.Port, err error) {
// If no port info, return zero values for all fields (protocol, dstPorts).
if port == nil {
return
}
// Port information available.
dstPorts, err = c.k8sPortToCalico(*port)
if err != nil {
return
}
protocol = c.k8sProtocolToCalico(port.Protocol)
return
}
func (c converter) k8sProtocolToCalico(protocol *kapiv1.Protocol) *numorstring.Protocol {
if protocol != nil {
p := numorstring.ProtocolFromString(string(*protocol))
return &p
}
return nil
}
func (c converter) k8sPeerToCalicoFields(peer *networkingv1.NetworkPolicyPeer, ns string) (selector, nsSelector string, nets []string, notNets []string) {
// If no peer, return zero values for all fields (selector, nets and !nets).
if peer == nil {
return
}
// Peer information available.
// Determine the source selector for the rule.
if peer.IPBlock != nil {
// Convert the CIDR to include.
_, ipNet, err := cnet.ParseCIDR(peer.IPBlock.CIDR)
if err != nil {
log.WithField("cidr", peer.IPBlock.CIDR).WithError(err).Error("Failed to parse CIDR")
return
}
nets = []string{ipNet.String()}
// Convert the CIDRs to exclude.
for _, exception := range peer.IPBlock.Except {
_, ipNet, err = cnet.ParseCIDR(exception)
if err != nil {
log.WithField("cidr", exception).WithError(err).Error("Failed to parse CIDR")
return
}
notNets = append(notNets, ipNet.String())
}
// If IPBlock is set, then PodSelector and NamespaceSelector cannot be.
return
}
// IPBlock is not set to get here.
// Note that k8sSelectorToCalico() accepts nil values of the selector.
selector = c.k8sSelectorToCalico(peer.PodSelector, SelectorPod)
nsSelector = c.k8sSelectorToCalico(peer.NamespaceSelector, SelectorNamespace)
return
}
func (c converter) k8sPortToCalico(port networkingv1.NetworkPolicyPort) ([]numorstring.Port, error) {
var portList []numorstring.Port
if port.Port != nil {
calicoPort := port.Port.String()
if port.EndPort != nil {
calicoPort = fmt.Sprintf("%s:%d", calicoPort, *port.EndPort)
}
p, err := numorstring.PortFromString(calicoPort)
if err != nil {
return nil, fmt.Errorf("invalid port %+v: %s", calicoPort, err)
}
return append(portList, p), nil
}
// No ports - return empty list.
return portList, nil
}
// ProfileNameToNamespace extracts the Namespace name from the given Profile name.
func (c converter) ProfileNameToNamespace(profileName string) (string, error) {
// Profile objects backed by Namespaces have form "kns.<ns_name>"
if !strings.HasPrefix(profileName, NamespaceProfileNamePrefix) {
// This is not backed by a Kubernetes Namespace.
return "", fmt.Errorf("Profile %s not backed by a Namespace", profileName)
}
return strings.TrimPrefix(profileName, NamespaceProfileNamePrefix), nil
}
// serviceAccountNameToProfileName creates a profile name that is a join
// of 'ksa.' + namespace + "." + serviceaccount name.
func serviceAccountNameToProfileName(sa, namespace string) string {
// Need to incorporate the namespace into the name of the sa based profile
// to make them globally unique
if namespace == "" {
namespace = "default"
}
return ServiceAccountProfileNamePrefix + namespace + "." + sa
}
// ServiceAccountToProfile converts a ServiceAccount to a Calico Profile. The Profile stores
// labels from the ServiceAccount which are inherited by the WorkloadEndpoints within
// the Profile.
func (c converter) ServiceAccountToProfile(sa *kapiv1.ServiceAccount) (*model.KVPair, error) {
// Generate the labels to apply to the profile, using a special prefix
// to indicate that these are the labels from the parent Kubernetes ServiceAccount.
labels := map[string]string{}
for k, v := range sa.ObjectMeta.Labels {
labels[ServiceAccountLabelPrefix+k] = v
}
// Add a label for the serviceaccount's name. This allows exact namespace matching
// based on name within the serviceAccountSelector.
labels[ServiceAccountLabelPrefix+NameLabel] = sa.Name
name := serviceAccountNameToProfileName(sa.Name, sa.Namespace)
profile := apiv3.NewProfile()
profile.ObjectMeta = metav1.ObjectMeta{
Name: name,
CreationTimestamp: sa.CreationTimestamp,
UID: sa.UID,
}
profile.Spec.LabelsToApply = labels
// Embed the profile in a KVPair.
kvp := model.KVPair{
Key: model.ResourceKey{
Name: name,
Kind: apiv3.KindProfile,
},
Value: profile,
Revision: c.JoinProfileRevisions("", sa.ResourceVersion),
}
return &kvp, nil
}
// ProfileNameToServiceAccount extracts the ServiceAccount name from the given Profile name.
func (c converter) ProfileNameToServiceAccount(profileName string) (ns, sa string, err error) {
// Profile objects backed by ServiceAccounts have form "ksa.<namespace>.<sa_name>"
if !strings.HasPrefix(profileName, ServiceAccountProfileNamePrefix) {
// This is not backed by a Kubernetes ServiceAccount.
err = fmt.Errorf("Profile %s not backed by a ServiceAccount", profileName)
return
}
names := strings.SplitN(profileName, ".", 3)
if len(names) != 3 {
err = fmt.Errorf("Profile %s is not formatted correctly", profileName)
return
}
ns = names[1]
sa = names[2]
return
}
// JoinProfileRevisions constructs the revision from the individual namespace and serviceaccount
// revisions.
// This is conditional on the feature flag for serviceaccount set or not.
func (c converter) JoinProfileRevisions(nsRev, saRev string) string {
return nsRev + "/" + saRev
}
// SplitProfileRevision extracts the namespace and serviceaccount revisions from the combined
// revision returned on the KDD service account based profile.
// This is conditional on the feature flag for serviceaccount set or not.
func (c converter) SplitProfileRevision(rev string) (nsRev string, saRev string, err error) {
if rev == "" || rev == "0" {
return
}
revs := strings.Split(rev, "/")
if len(revs) != 2 {
err = fmt.Errorf("ResourceVersion is not valid: %s", rev)
return
}
nsRev = revs[0]
saRev = revs[1]
return
}
func stringsToIPNets(ipStrings []string) ([]*cnet.IPNet, error) {
var podIPNets []*cnet.IPNet
for _, ip := range ipStrings {
_, ipNet, err := cnet.ParseCIDROrIP(ip)
if err != nil {
return nil, err
}
podIPNets = append(podIPNets, ipNet)
}
return podIPNets, nil
}

View File

@@ -0,0 +1,32 @@
// Copyright (c) 2016-2020 Tigera, Inc. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// TODO move the WorkloadEndpoint converters to is own package. Some refactoring of the annotation and label constants
// is necessary to avoid circular imports, which is why this has been deferred.
package conversion
import (
kapiv1 "k8s.io/api/core/v1"
"github.com/projectcalico/calico/libcalico-go/lib/backend/model"
)
type WorkloadEndpointConverter interface {
VethNameForWorkload(namespace, podName string) string
PodToWorkloadEndpoints(pod *kapiv1.Pod) ([]*model.KVPair, error)
}
func NewWorkloadEndpointConverter() WorkloadEndpointConverter {
return &defaultWorkloadEndpointConverter{}
}

View File

@@ -0,0 +1,285 @@
// Copyright (c) 2016-2021 Tigera, Inc. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package conversion
import (
"crypto/sha1"
"encoding/hex"
"fmt"
"os"
"strings"
log "github.com/sirupsen/logrus"
kapiv1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
apiv3 "github.com/projectcalico/api/pkg/apis/projectcalico/v3"
"github.com/projectcalico/api/pkg/lib/numorstring"
libapiv3 "github.com/projectcalico/calico/libcalico-go/lib/apis/v3"
"github.com/projectcalico/calico/libcalico-go/lib/backend/model"
"github.com/projectcalico/calico/libcalico-go/lib/json"
"github.com/projectcalico/calico/libcalico-go/lib/names"
cnet "github.com/projectcalico/calico/libcalico-go/lib/net"
)
type defaultWorkloadEndpointConverter struct{}
// VethNameForWorkload returns a deterministic veth name
// for the given Kubernetes workload (WEP) name and namespace.
func (wc defaultWorkloadEndpointConverter) VethNameForWorkload(namespace, podname string) string {
// A SHA1 is always 20 bytes long, and so is sufficient for generating the
// veth name and mac addr.
h := sha1.New()
h.Write([]byte(fmt.Sprintf("%s.%s", namespace, podname)))
prefix := os.Getenv("FELIX_INTERFACEPREFIX")
if prefix == "" {
// Prefix is not set. Default to "cali"
prefix = "cali"
} else {
// Prefix is set - use the first value in the list.
splits := strings.Split(prefix, ",")
prefix = splits[0]
}
log.WithField("prefix", prefix).Debugf("Using prefix to create a WorkloadEndpoint veth name")
return fmt.Sprintf("%s%s", prefix, hex.EncodeToString(h.Sum(nil))[:11])
}
func (wc defaultWorkloadEndpointConverter) PodToWorkloadEndpoints(pod *kapiv1.Pod) ([]*model.KVPair, error) {
wep, err := wc.podToDefaultWorkloadEndpoint(pod)
if err != nil {
return nil, err
}
return []*model.KVPair{wep}, nil
}
// PodToWorkloadEndpoint converts a Pod to a WorkloadEndpoint. It assumes the calling code
// has verified that the provided Pod is valid to convert to a WorkloadEndpoint.
// PodToWorkloadEndpoint requires a Pods Name and Node Name to be populated. It will
// fail to convert from a Pod to WorkloadEndpoint otherwise.
func (wc defaultWorkloadEndpointConverter) podToDefaultWorkloadEndpoint(pod *kapiv1.Pod) (*model.KVPair, error) {
log.WithField("pod", pod).Debug("Converting pod to WorkloadEndpoint")
// Get all the profiles that apply
var profiles []string
// Pull out the Namespace based profile off the pod name and Namespace.
profiles = append(profiles, NamespaceProfileNamePrefix+pod.Namespace)
// Pull out the Serviceaccount based profile off the pod SA and namespace
if pod.Spec.ServiceAccountName != "" {
profiles = append(profiles, serviceAccountNameToProfileName(pod.Spec.ServiceAccountName, pod.Namespace))
}
wepids := names.WorkloadEndpointIdentifiers{
Node: pod.Spec.NodeName,
Orchestrator: apiv3.OrchestratorKubernetes,
Endpoint: "eth0",
Pod: pod.Name,
}
wepName, err := wepids.CalculateWorkloadEndpointName(false)
if err != nil {
return nil, err
}
podIPNets, err := getPodIPs(pod)
if err != nil {
// IP address was present but malformed in some way, handle as an explicit failure.
return nil, err
}
if IsFinished(pod) {
// Pod is finished but not yet deleted. In this state the IP will have been freed and returned to the pool
// so we need to make sure we don't let the caller believe it still belongs to this endpoint.
// Pods with no IPs will get filtered out before they get to Felix in the watcher syncer cache layer.
// We can't pretend the workload endpoint is deleted _here_ because that would confuse users of the
// native v3 Watch() API.
log.Debug("Pod is in a 'finished' state so no longer owns its IP(s).")
podIPNets = nil
}
ipNets := []string{}
for _, ipNet := range podIPNets {
ipNets = append(ipNets, ipNet.String())
}
// Generate the interface name based on workload. This must match
// the host-side veth configured by the CNI plugin.
interfaceName := wc.VethNameForWorkload(pod.Namespace, pod.Name)
// Build the labels map. Start with the pod labels, and append two additional labels for
// namespace and orchestrator matches.
labels := pod.Labels
if labels == nil {
labels = make(map[string]string, 2)
}
labels[apiv3.LabelNamespace] = pod.Namespace
labels[apiv3.LabelOrchestrator] = apiv3.OrchestratorKubernetes
if pod.Spec.ServiceAccountName != "" && len(pod.Spec.ServiceAccountName) < 63 {
// For backwards compatibility, include the label if less than 63 characters.
labels[apiv3.LabelServiceAccount] = pod.Spec.ServiceAccountName
}
// Pull out floating IP annotation
var floatingIPs []libapiv3.IPNAT
if annotation, ok := pod.Annotations["cni.projectcalico.org/floatingIPs"]; ok && len(podIPNets) > 0 {
// Parse Annotation data
var ips []string
err := json.Unmarshal([]byte(annotation), &ips)
if err != nil {
return nil, fmt.Errorf("failed to parse '%s' as JSON: %s", annotation, err)
}
// Get IPv4 and IPv6 targets for NAT
var podnetV4, podnetV6 *cnet.IPNet
for _, ipNet := range podIPNets {
if ipNet.IP.To4() != nil {
podnetV4 = ipNet
netmask, _ := podnetV4.Mask.Size()
if netmask != 32 {
return nil, fmt.Errorf("PodIP %v is not a valid IPv4: Mask size is %d, not 32", ipNet, netmask)
}
} else {
podnetV6 = ipNet
netmask, _ := podnetV6.Mask.Size()
if netmask != 128 {
return nil, fmt.Errorf("PodIP %v is not a valid IPv6: Mask size is %d, not 128", ipNet, netmask)
}
}
}
for _, ip := range ips {
if strings.Contains(ip, ":") {
if podnetV6 != nil {
floatingIPs = append(floatingIPs, libapiv3.IPNAT{
InternalIP: podnetV6.IP.String(),
ExternalIP: ip,
})
}
} else {
if podnetV4 != nil {
floatingIPs = append(floatingIPs, libapiv3.IPNAT{
InternalIP: podnetV4.IP.String(),
ExternalIP: ip,
})
}
}
}
}
// Handle source IP spoofing annotation
var sourcePrefixes []string
if annotation, ok := pod.Annotations["cni.projectcalico.org/allowedSourcePrefixes"]; ok && annotation != "" {
// Parse Annotation data
var requestedSourcePrefixes []string
err := json.Unmarshal([]byte(annotation), &requestedSourcePrefixes)
if err != nil {
return nil, fmt.Errorf("failed to parse '%s' as JSON: %s", annotation, err)
}
// Filter out any invalid entries and normalize the CIDRs.
for _, prefix := range requestedSourcePrefixes {
if _, n, err := cnet.ParseCIDR(prefix); err != nil {
return nil, fmt.Errorf("failed to parse '%s' as a CIDR: %s", prefix, err)
} else {
sourcePrefixes = append(sourcePrefixes, n.String())
}
}
}
// Map any named ports through.
var endpointPorts []libapiv3.WorkloadEndpointPort
for _, container := range pod.Spec.Containers {
for _, containerPort := range container.Ports {
if containerPort.ContainerPort != 0 && (containerPort.HostPort != 0 || containerPort.Name != "") {
var modelProto numorstring.Protocol
switch containerPort.Protocol {
case kapiv1.ProtocolUDP:
modelProto = numorstring.ProtocolFromString("udp")
case kapiv1.ProtocolSCTP:
modelProto = numorstring.ProtocolFromString("sctp")
case kapiv1.ProtocolTCP, kapiv1.Protocol("") /* K8s default is TCP. */ :
modelProto = numorstring.ProtocolFromString("tcp")
default:
log.WithFields(log.Fields{
"protocol": containerPort.Protocol,
"pod": pod,
"port": containerPort,
}).Debug("Ignoring named port with unknown protocol")
continue
}
endpointPorts = append(endpointPorts, libapiv3.WorkloadEndpointPort{
Name: containerPort.Name,
Protocol: modelProto,
Port: uint16(containerPort.ContainerPort),
HostPort: uint16(containerPort.HostPort),
HostIP: containerPort.HostIP,
})
}
}
}
// Get the container ID if present. This is used in the CNI plugin to distinguish different pods that have
// the same name. For example, restarted stateful set pods.
containerID := pod.Annotations[AnnotationContainerID]
// Create the workload endpoint.
wep := libapiv3.NewWorkloadEndpoint()
wep.ObjectMeta = metav1.ObjectMeta{
Name: wepName,
Namespace: pod.Namespace,
CreationTimestamp: pod.CreationTimestamp,
UID: pod.UID,
Labels: labels,
GenerateName: pod.GenerateName,
}
wep.Spec = libapiv3.WorkloadEndpointSpec{
Orchestrator: "k8s",
Node: pod.Spec.NodeName,
Pod: pod.Name,
ContainerID: containerID,
Endpoint: "eth0",
InterfaceName: interfaceName,
Profiles: profiles,
IPNetworks: ipNets,
Ports: endpointPorts,
IPNATs: floatingIPs,
ServiceAccountName: pod.Spec.ServiceAccountName,
AllowSpoofedSourcePrefixes: sourcePrefixes,
}
if v, ok := pod.Annotations["k8s.v1.cni.cncf.io/network-status"]; ok {
if wep.Annotations == nil {
wep.Annotations = make(map[string]string)
}
wep.Annotations["k8s.v1.cni.cncf.io/network-status"] = v
}
// Embed the workload endpoint into a KVPair.
kvp := model.KVPair{
Key: model.ResourceKey{
Name: wepName,
Namespace: pod.Namespace,
Kind: libapiv3.KindWorkloadEndpoint,
},
Value: wep,
Revision: pod.ResourceVersion,
}
return &kvp, nil
}

View File

@@ -0,0 +1,58 @@
// Copyright (c) 2017 Tigera, Inc. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import (
"fmt"
"reflect"
"github.com/projectcalico/calico/libcalico-go/lib/errors"
)
var (
typeBGPNode = reflect.TypeOf(BGPNode{})
)
type BGPNodeKey struct {
Host string
}
func (key BGPNodeKey) defaultPath() (string, error) {
if key.Host == "" {
return "", errors.ErrorInsufficientIdentifiers{Name: "host"}
}
k := "/calico/bgp/v1/host/" + key.Host
return k, nil
}
func (key BGPNodeKey) defaultDeletePath() (string, error) {
return key.defaultPath()
}
func (key BGPNodeKey) defaultDeleteParentPaths() ([]string, error) {
return nil, nil
}
func (key BGPNodeKey) valueType() (reflect.Type, error) {
return typeBGPNode, nil
}
func (key BGPNodeKey) String() string {
return fmt.Sprintf("BGPNodeKey(host=%s)", key.Host)
}
type BGPNode struct {
}

View File

@@ -0,0 +1,162 @@
// Copyright (c) 2016 Tigera, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import (
"fmt"
"reflect"
"regexp"
log "github.com/sirupsen/logrus"
"github.com/projectcalico/calico/libcalico-go/lib/errors"
)
var (
matchGlobalBGPConfig = regexp.MustCompile("^/?calico/bgp/v1/global/(.+)$")
matchNodeBGPConfig = regexp.MustCompile("^/?calico/bgp/v1/host/([^/]+)/(.+)$")
typeGlobalBGPConfig = rawStringType
typeNodeBGPConfig = rawStringType
)
type GlobalBGPConfigKey struct {
// The name of the global BGP config key.
Name string `json:"-" validate:"required,name"`
}
func (key GlobalBGPConfigKey) defaultPath() (string, error) {
return key.defaultDeletePath()
}
func (key GlobalBGPConfigKey) defaultDeletePath() (string, error) {
if key.Name == "" {
return "", errors.ErrorInsufficientIdentifiers{Name: "name"}
}
e := fmt.Sprintf("/calico/bgp/v1/global/%s", key.Name)
return e, nil
}
func (key GlobalBGPConfigKey) defaultDeleteParentPaths() ([]string, error) {
return nil, nil
}
func (key GlobalBGPConfigKey) valueType() (reflect.Type, error) {
return typeGlobalBGPConfig, nil
}
func (key GlobalBGPConfigKey) String() string {
return fmt.Sprintf("GlobalBGPConfig(name=%s)", key.Name)
}
type GlobalBGPConfigListOptions struct {
Name string
}
func (options GlobalBGPConfigListOptions) defaultPathRoot() string {
k := "/calico/bgp/v1/global"
if options.Name == "" {
return k
}
k = k + fmt.Sprintf("/%s", options.Name)
return k
}
func (options GlobalBGPConfigListOptions) KeyFromDefaultPath(path string) Key {
log.Debugf("Get GlobalFelixConfig key from %s", path)
r := matchGlobalBGPConfig.FindAllStringSubmatch(path, -1)
if len(r) != 1 {
log.Debugf("Didn't match regex")
return nil
}
name := r[0][1]
if options.Name != "" && name != options.Name {
log.Debugf("Didn't match name %s != %s", options.Name, name)
return nil
}
return GlobalBGPConfigKey{Name: name}
}
type NodeBGPConfigKey struct {
// The hostname for the host specific BGP config
Nodename string `json:"-" validate:"required,name"`
// The name of the host specific BGP config key.
Name string `json:"-" validate:"required,name"`
}
func (key NodeBGPConfigKey) defaultPath() (string, error) {
return key.defaultDeletePath()
}
func (key NodeBGPConfigKey) defaultDeletePath() (string, error) {
if key.Nodename == "" {
return "", errors.ErrorInsufficientIdentifiers{Name: "node"}
}
if key.Name == "" {
return "", errors.ErrorInsufficientIdentifiers{Name: "name"}
}
e := fmt.Sprintf("/calico/bgp/v1/host/%s/%s", key.Nodename, key.Name)
return e, nil
}
func (key NodeBGPConfigKey) defaultDeleteParentPaths() ([]string, error) {
return nil, nil
}
func (key NodeBGPConfigKey) valueType() (reflect.Type, error) {
return typeNodeBGPConfig, nil
}
func (key NodeBGPConfigKey) String() string {
return fmt.Sprintf("HostBGPConfig(node=%s; name=%s)", key.Nodename, key.Name)
}
type NodeBGPConfigListOptions struct {
Nodename string
Name string
}
func (options NodeBGPConfigListOptions) defaultPathRoot() string {
k := "/calico/bgp/v1/host/%s"
if options.Nodename == "" {
return k
}
k = k + fmt.Sprintf("/%s", options.Nodename)
if options.Name == "" {
return k
}
k = k + fmt.Sprintf("/%s", options.Name)
return k
}
func (options NodeBGPConfigListOptions) KeyFromDefaultPath(path string) Key {
log.Debugf("Get HostConfig key from %s", path)
r := matchNodeBGPConfig.FindAllStringSubmatch(path, -1)
if len(r) != 1 {
log.Debugf("Didn't match regex")
return nil
}
nodename := r[0][1]
name := r[0][2]
if options.Nodename != "" && nodename != options.Nodename {
log.Debugf("Didn't match nodename %s != %s", options.Nodename, nodename)
return nil
}
if options.Name != "" && name != options.Name {
log.Debugf("Didn't match name %s != %s", options.Name, name)
return nil
}
return NodeBGPConfigKey{Nodename: nodename, Name: name}
}

View File

@@ -0,0 +1,230 @@
// Copyright (c) 2020 Tigera, Inc. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import (
"fmt"
"reflect"
"regexp"
"strconv"
"strings"
log "github.com/sirupsen/logrus"
"github.com/projectcalico/api/pkg/lib/numorstring"
"github.com/projectcalico/calico/libcalico-go/lib/errors"
"github.com/projectcalico/calico/libcalico-go/lib/net"
)
var (
matchGlobalBGPPeer = regexp.MustCompile("^/?calico/bgp/v1/global/peer_v./([^/]+)$")
matchHostBGPPeer = regexp.MustCompile("^/?calico/bgp/v1/host/([^/]+)/peer_v./([^/]+)$")
typeBGPPeer = reflect.TypeOf(BGPPeer{})
ipPortSeparator = "-"
defaultPort uint16 = 179
)
type NodeBGPPeerKey struct {
Nodename string `json:"-" validate:"omitempty"`
PeerIP net.IP `json:"-" validate:"required"`
Port uint16 `json:"-" validate:"omitempty"`
}
func (key NodeBGPPeerKey) defaultPath() (string, error) {
if key.PeerIP.IP == nil {
return "", errors.ErrorInsufficientIdentifiers{Name: "peerIP"}
}
if key.Nodename == "" {
return "", errors.ErrorInsufficientIdentifiers{Name: "node"}
}
e := fmt.Sprintf("/calico/bgp/v1/host/%s/peer_v%d/%s",
key.Nodename, key.PeerIP.Version(), combineIPAndPort(key.PeerIP, key.Port))
return e, nil
}
func (key NodeBGPPeerKey) defaultDeletePath() (string, error) {
return key.defaultPath()
}
func (key NodeBGPPeerKey) defaultDeleteParentPaths() ([]string, error) {
return nil, nil
}
func (key NodeBGPPeerKey) valueType() (reflect.Type, error) {
return typeBGPPeer, nil
}
func (key NodeBGPPeerKey) String() string {
return fmt.Sprintf("BGPPeer(node=%s, ip=%s, port=%d)", key.Nodename, key.PeerIP, key.Port)
}
type NodeBGPPeerListOptions struct {
Nodename string
PeerIP net.IP
Port uint16
}
func (options NodeBGPPeerListOptions) defaultPathRoot() string {
if options.Nodename == "" {
return "/calico/bgp/v1/host"
} else if options.PeerIP.IP == nil {
return fmt.Sprintf("/calico/bgp/v1/host/%s",
options.Nodename)
} else {
return fmt.Sprintf("/calico/bgp/v1/host/%s/peer_v%d/%s",
options.Nodename, options.PeerIP.Version(), combineIPAndPort(options.PeerIP, options.Port))
}
}
func (options NodeBGPPeerListOptions) KeyFromDefaultPath(path string) Key {
log.Debugf("Get BGPPeer key from %s", path)
nodename := ""
var port uint16
peerIP := net.IP{}
ekeyb := []byte(path)
if r := matchHostBGPPeer.FindAllSubmatch(ekeyb, -1); len(r) == 1 {
var ipBytes []byte
ipBytes, port = extractIPAndPort(string(r[0][2]))
nodename = string(r[0][1])
if err := peerIP.UnmarshalText(ipBytes); err != nil {
log.WithError(err).WithField("PeerIP", r[0][2]).Error("Error unmarshalling GlobalBGPPeer IP address")
return nil
}
} else {
log.Debugf("%s didn't match regex", path)
return nil
}
if options.PeerIP.IP != nil && !options.PeerIP.Equal(peerIP.IP) {
log.Debugf("Didn't match peerIP %s != %s", options.PeerIP.String(), peerIP.String())
return nil
}
if options.Nodename != "" && nodename != options.Nodename {
log.Debugf("Didn't match hostname %s != %s", options.Nodename, nodename)
return nil
}
if port == 0 {
return NodeBGPPeerKey{PeerIP: peerIP, Nodename: nodename}
}
return NodeBGPPeerKey{PeerIP: peerIP, Nodename: nodename, Port: port}
}
type GlobalBGPPeerKey struct {
PeerIP net.IP `json:"-" validate:"required"`
Port uint16 `json:"-" validate:"omitempty"`
}
func (key GlobalBGPPeerKey) defaultPath() (string, error) {
if key.PeerIP.IP == nil {
return "", errors.ErrorInsufficientIdentifiers{Name: "peerIP"}
}
e := fmt.Sprintf("/calico/bgp/v1/global/peer_v%d/%s",
key.PeerIP.Version(), combineIPAndPort(key.PeerIP, key.Port))
return e, nil
}
func (key GlobalBGPPeerKey) defaultDeletePath() (string, error) {
return key.defaultPath()
}
func (key GlobalBGPPeerKey) defaultDeleteParentPaths() ([]string, error) {
return nil, nil
}
func (key GlobalBGPPeerKey) valueType() (reflect.Type, error) {
return typeBGPPeer, nil
}
func (key GlobalBGPPeerKey) String() string {
return fmt.Sprintf("BGPPeer(global, ip=%s, port=%d)", key.PeerIP, key.Port)
}
type GlobalBGPPeerListOptions struct {
PeerIP net.IP
Port uint16
}
func (options GlobalBGPPeerListOptions) defaultPathRoot() string {
if options.PeerIP.IP == nil {
return "/calico/bgp/v1/global"
} else {
return fmt.Sprintf("/calico/bgp/v1/global/peer_v%d/%s",
options.PeerIP.Version(), combineIPAndPort(options.PeerIP, options.Port))
}
}
func (options GlobalBGPPeerListOptions) KeyFromDefaultPath(path string) Key {
log.Debugf("Get BGPPeer key from %s", path)
peerIP := net.IP{}
ekeyb := []byte(path)
var port uint16
if r := matchGlobalBGPPeer.FindAllSubmatch(ekeyb, -1); len(r) == 1 {
var ipBytes []byte
ipBytes, port = extractIPAndPort(string(r[0][1]))
if err := peerIP.UnmarshalText(ipBytes); err != nil {
log.WithError(err).WithField("PeerIP", r[0][1]).Error("Error unmarshalling GlobalBGPPeer IP address")
return nil
}
} else {
log.Debugf("%s didn't match regex", path)
return nil
}
if options.PeerIP.IP != nil && !options.PeerIP.Equal(peerIP.IP) {
log.Debugf("Didn't match peerIP %s != %s", options.PeerIP.String(), peerIP.String())
return nil
}
if port == 0 {
return GlobalBGPPeerKey{PeerIP: peerIP, Port: port}
}
return GlobalBGPPeerKey{PeerIP: peerIP, Port: port}
}
type BGPPeer struct {
// PeerIP is the IP address of the BGP peer.
PeerIP net.IP `json:"ip"`
// ASNum is the AS number of the peer. Note that we write out the
// value as a string in to the backend, because confd templating
// converts large uints to float e notation which breaks the BIRD
// configuration.
ASNum numorstring.ASNumber `json:"as_num,string"`
}
func extractIPAndPort(ipPort string) ([]byte, uint16) {
arr := strings.Split(ipPort, ipPortSeparator)
if len(arr) == 2 {
port, err := strconv.ParseUint(arr[1], 0, 16)
if err != nil {
log.Warningf("Error extracting port. %#v", err)
return []byte(ipPort), defaultPort
}
return []byte(arr[0]), uint16(port)
}
return []byte(ipPort), defaultPort
}
func combineIPAndPort(ip net.IP, port uint16) string {
if port == 0 || port == defaultPort {
return ip.String()
} else {
strPort := strconv.Itoa(int(port))
return ip.String() + ipPortSeparator + strPort
}
}

View File

@@ -0,0 +1,235 @@
// Copyright (c) 2016-2021 Tigera, Inc. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import (
"fmt"
"math/big"
"reflect"
"regexp"
"strings"
log "github.com/sirupsen/logrus"
"github.com/projectcalico/calico/libcalico-go/lib/errors"
"github.com/projectcalico/calico/libcalico-go/lib/net"
)
const (
// Common attributes which may be set on allocations by clients.
IPAMBlockAttributePod = "pod"
IPAMBlockAttributeNamespace = "namespace"
IPAMBlockAttributeNode = "node"
IPAMBlockAttributeType = "type"
IPAMBlockAttributeTypeIPIP = "ipipTunnelAddress"
IPAMBlockAttributeTypeVXLAN = "vxlanTunnelAddress"
IPAMBlockAttributeTypeVXLANV6 = "vxlanV6TunnelAddress"
IPAMBlockAttributeTypeWireguard = "wireguardTunnelAddress"
IPAMBlockAttributeTypeWireguardV6 = "wireguardV6TunnelAddress"
IPAMBlockAttributeTimestamp = "timestamp"
)
var (
matchBlock = regexp.MustCompile("^/?calico/ipam/v2/assignment/ipv./block/([^/]+)$")
typeBlock = reflect.TypeOf(AllocationBlock{})
)
type BlockKey struct {
CIDR net.IPNet `json:"-" validate:"required,name"`
}
func (key BlockKey) defaultPath() (string, error) {
if key.CIDR.IP == nil {
return "", errors.ErrorInsufficientIdentifiers{}
}
c := strings.Replace(key.CIDR.String(), "/", "-", 1)
e := fmt.Sprintf("/calico/ipam/v2/assignment/ipv%d/block/%s", key.CIDR.Version(), c)
return e, nil
}
func (key BlockKey) defaultDeletePath() (string, error) {
return key.defaultPath()
}
func (key BlockKey) defaultDeleteParentPaths() ([]string, error) {
return nil, nil
}
func (key BlockKey) valueType() (reflect.Type, error) {
return typeBlock, nil
}
func (key BlockKey) String() string {
return fmt.Sprintf("BlockKey(cidr=%s)", key.CIDR.String())
}
type BlockListOptions struct {
IPVersion int `json:"-"`
}
func (options BlockListOptions) defaultPathRoot() string {
k := "/calico/ipam/v2/assignment/"
if options.IPVersion != 0 {
k = k + fmt.Sprintf("ipv%d/", options.IPVersion)
}
return k
}
func (options BlockListOptions) KeyFromDefaultPath(path string) Key {
log.Debugf("Get Block key from %s", path)
r := matchBlock.FindAllStringSubmatch(path, -1)
if len(r) != 1 {
log.Debugf("%s didn't match regex", path)
return nil
}
cidrStr := strings.Replace(r[0][1], "-", "/", 1)
_, cidr, err := net.ParseCIDR(cidrStr)
if err != nil {
log.Debugf("find an invalid cidr %s for path=%v , info=%v ", r[0][1], path, err)
return nil
}
return BlockKey{CIDR: *cidr}
}
type AllocationBlock struct {
// The block's CIDR.
CIDR net.IPNet `json:"cidr"`
// Affinity of the block, if this block has one. If set, it will be of the form
// "host:<hostname>". If not set, this block is not affine to a host.
Affinity *string `json:"affinity"`
// Array of allocations in-use within this block. nil entries mean the allocation is free.
// For non-nil entries at index i, the index is the ordinal of the allocation within this block
// and the value is the index of the associated attributes in the Attributes array.
Allocations []*int `json:"allocations"`
// Unallocated is an ordered list of allocations which are free in the block.
Unallocated []int `json:"unallocated"`
// Attributes is an array of arbitrary metadata associated with allocations in the block. To find
// attributes for a given allocation, use the value of the allocation's entry in the Allocations array
// as the index of the element in this array.
Attributes []AllocationAttribute `json:"attributes"`
// We store a sequence number that is updated each time the block is written.
// Each allocation will also store the sequence number of the block at the time of its creation.
// When releasing an IP, passing the sequence number associated with the allocation allows us
// to protect against a race condition and ensure the IP hasn't been released and re-allocated
// since the release request.
SequenceNumber uint64 `json:"sequenceNumber"`
// Map of allocated ordinal within the block to sequence number of the block at
// the time of allocation. Kubernetes does not allow numerical keys for maps, so
// the key is cast to a string.
SequenceNumberForAllocation map[string]uint64 `json:"sequenceNumberForAllocation"`
// Deleted is an internal boolean used to workaround a limitation in the Kubernetes API whereby
// deletion will not return a conflict error if the block has been updated.
Deleted bool `json:"deleted"`
// HostAffinity is deprecated in favor of Affinity.
// This is only to keep compatibility with existing deployments.
// The data format should be `Affinity: host:hostname` (not `hostAffinity: hostname`).
HostAffinity *string `json:"hostAffinity,omitempty"`
}
func (b *AllocationBlock) SetSequenceNumberForOrdinal(ordinal int) {
if b.SequenceNumberForAllocation == nil {
b.SequenceNumberForAllocation = map[string]uint64{}
}
b.SequenceNumberForAllocation[fmt.Sprintf("%d", ordinal)] = b.SequenceNumber
}
func (b *AllocationBlock) GetSequenceNumberForOrdinal(ordinal int) uint64 {
return b.SequenceNumberForAllocation[fmt.Sprintf("%d", ordinal)]
}
func (b *AllocationBlock) ClearSequenceNumberForOrdinal(ordinal int) {
delete(b.SequenceNumberForAllocation, fmt.Sprintf("%d", ordinal))
}
func (b *AllocationBlock) MarkDeleted() {
b.Deleted = true
}
func (b *AllocationBlock) IsDeleted() bool {
return b.Deleted
}
func (b *AllocationBlock) Host() string {
if b.Affinity != nil && strings.HasPrefix(*b.Affinity, "host:") {
return strings.TrimPrefix(*b.Affinity, "host:")
}
return ""
}
type Allocation struct {
Addr net.IP
Host string
}
func (b *AllocationBlock) NonAffineAllocations() []Allocation {
var allocs []Allocation
myHost := b.Host()
for ordinal, attrIdx := range b.Allocations {
if attrIdx == nil {
continue // Skip unallocated IPs.
}
if *attrIdx >= len(b.Attributes) {
log.WithField("block", b).Warnf("Missing attributes for IP with ordinal %d", ordinal)
continue
}
attrs := b.Attributes[*attrIdx]
host := attrs.AttrSecondary[IPAMBlockAttributeNode]
if myHost != "" && host == myHost {
continue // Skip allocations that are affine to this block.
}
a := Allocation{
Addr: b.OrdinalToIP(ordinal),
Host: host,
}
allocs = append(allocs, a)
}
return allocs
}
// Get number of addresses covered by the block
func (b *AllocationBlock) NumAddresses() int {
ones, size := b.CIDR.Mask.Size()
numAddresses := 1 << uint(size-ones)
return numAddresses
}
// Find the ordinal (i.e. how far into the block) a given IP lies. Returns an error if the IP is outside the block.
func (b *AllocationBlock) IPToOrdinal(ip net.IP) (int, error) {
ipAsInt := net.IPToBigInt(ip)
baseInt := net.IPToBigInt(net.IP{b.CIDR.IP})
ord := big.NewInt(0).Sub(ipAsInt, baseInt).Int64()
if ord < 0 || ord >= int64(b.NumAddresses()) {
return 0, fmt.Errorf("IP %s not in block %s", ip, b.CIDR)
}
return int(ord), nil
}
// Calculates the IP at the given position within the block. ord=0 gives the first IP in the block.
func (b *AllocationBlock) OrdinalToIP(ord int) net.IP {
return b.CIDR.NthIP(ord)
}
type AllocationAttribute struct {
AttrPrimary *string `json:"handle_id"`
AttrSecondary map[string]string `json:"secondary"`
}

View File

@@ -0,0 +1,118 @@
// Copyright (c) 2016-2019 Tigera, Inc. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import (
"fmt"
"reflect"
"regexp"
"strings"
log "github.com/sirupsen/logrus"
"github.com/projectcalico/calico/libcalico-go/lib/errors"
"github.com/projectcalico/calico/libcalico-go/lib/net"
)
var (
matchBlockAffinity = regexp.MustCompile("^/?calico/ipam/v2/host/([^/]+)/ipv./block/([^/]+)$")
typeBlockAff = reflect.TypeOf(BlockAffinity{})
)
type BlockAffinityState string
const (
StateConfirmed BlockAffinityState = "confirmed"
StatePending BlockAffinityState = "pending"
StatePendingDeletion BlockAffinityState = "pendingDeletion"
)
type BlockAffinityKey struct {
CIDR net.IPNet `json:"-" validate:"required,name"`
Host string `json:"-"`
}
type BlockAffinity struct {
State BlockAffinityState `json:"state"`
Deleted bool `json:"deleted"`
}
func (key BlockAffinityKey) defaultPath() (string, error) {
if key.CIDR.IP == nil || key.Host == "" {
return "", errors.ErrorInsufficientIdentifiers{}
}
c := strings.Replace(key.CIDR.String(), "/", "-", 1)
e := fmt.Sprintf("/calico/ipam/v2/host/%s/ipv%d/block/%s", key.Host, key.CIDR.Version(), c)
return e, nil
}
func (key BlockAffinityKey) defaultDeletePath() (string, error) {
return key.defaultPath()
}
func (key BlockAffinityKey) defaultDeleteParentPaths() ([]string, error) {
return nil, nil
}
func (key BlockAffinityKey) valueType() (reflect.Type, error) {
return typeBlockAff, nil
}
func (key BlockAffinityKey) String() string {
return fmt.Sprintf("BlockAffinityKey(cidr=%s, host=%s)", key.CIDR, key.Host)
}
type BlockAffinityListOptions struct {
Host string
IPVersion int
}
func (options BlockAffinityListOptions) defaultPathRoot() string {
k := "/calico/ipam/v2/host/"
if options.Host != "" {
k = k + options.Host
if options.IPVersion != 0 {
k = k + fmt.Sprintf("/ipv%d/block", options.IPVersion)
}
}
return k
}
func (options BlockAffinityListOptions) KeyFromDefaultPath(path string) Key {
log.Debugf("Get Block affinity key from %s", path)
r := matchBlockAffinity.FindAllStringSubmatch(path, -1)
if len(r) != 1 {
log.Debugf("%s didn't match regex", path)
return nil
}
cidrStr := strings.Replace(r[0][2], "-", "/", 1)
_, cidr, _ := net.ParseCIDR(cidrStr)
if cidr == nil {
log.Debugf("Failed to parse CIDR in block affinity path: %q", path)
return nil
}
host := r[0][1]
if options.Host != "" && options.Host != host {
log.Debugf("Didn't match hostname: %s != %s", options.Host, host)
return nil
}
if options.IPVersion != 0 && options.IPVersion != cidr.Version() {
log.Debugf("Didn't match IP version. %d != %d", options.IPVersion, cidr.Version())
return nil
}
return BlockAffinityKey{CIDR: *cidr, Host: host}
}

View File

@@ -0,0 +1,20 @@
// Copyright (c) 2019 Tigera, Inc. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
type DeletionMarker interface {
MarkDeleted()
IsDeleted() bool
}

View File

@@ -0,0 +1,183 @@
// Copyright (c) 2016-2018 Tigera, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import (
"fmt"
"reflect"
"regexp"
log "github.com/sirupsen/logrus"
"github.com/projectcalico/calico/libcalico-go/lib/errors"
)
var (
matchGlobalConfig = regexp.MustCompile("^/?calico/v1/config/(.+)$")
matchHostConfig = regexp.MustCompile("^/?calico/v1/host/([^/]+)/config/(.+)$")
matchReadyFlag = regexp.MustCompile("^/calico/v1/Ready$")
typeGlobalConfig = rawStringType
typeHostConfig = rawStringType
typeReadyFlag = rawBoolType
)
type ReadyFlagKey struct {
}
func (key ReadyFlagKey) defaultPath() (string, error) {
return "/calico/v1/Ready", nil
}
func (key ReadyFlagKey) defaultDeletePath() (string, error) {
return key.defaultPath()
}
func (key ReadyFlagKey) defaultDeleteParentPaths() ([]string, error) {
return nil, nil
}
func (key ReadyFlagKey) valueType() (reflect.Type, error) {
return typeReadyFlag, nil
}
func (key ReadyFlagKey) String() string {
return "ReadyFlagKey()"
}
type GlobalConfigKey struct {
Name string `json:"-" validate:"required,name"`
}
func (key GlobalConfigKey) defaultPath() (string, error) {
if key.Name == "" {
return "", errors.ErrorInsufficientIdentifiers{Name: "name"}
}
e := fmt.Sprintf("/calico/v1/config/%s", key.Name)
return e, nil
}
func (key GlobalConfigKey) defaultDeletePath() (string, error) {
return key.defaultPath()
}
func (key GlobalConfigKey) defaultDeleteParentPaths() ([]string, error) {
return nil, nil
}
func (key GlobalConfigKey) valueType() (reflect.Type, error) {
return typeGlobalConfig, nil
}
func (key GlobalConfigKey) String() string {
return fmt.Sprintf("GlobalFelixConfig(name=%s)", key.Name)
}
type GlobalConfigListOptions struct {
Name string
}
func (options GlobalConfigListOptions) defaultPathRoot() string {
k := "/calico/v1/config"
if options.Name == "" {
return k
}
k = k + fmt.Sprintf("/%s", options.Name)
return k
}
func (options GlobalConfigListOptions) KeyFromDefaultPath(path string) Key {
log.Debugf("Get GlobalConfig key from %s", path)
r := matchGlobalConfig.FindAllStringSubmatch(path, -1)
if len(r) != 1 {
log.Debugf("Didn't match regex")
return nil
}
name := r[0][1]
if options.Name != "" && name != options.Name {
log.Debugf("Didn't match name %s != %s", options.Name, name)
return nil
}
return GlobalConfigKey{Name: name}
}
type HostConfigKey struct {
Hostname string `json:"-" validate:"required,name"`
Name string `json:"-" validate:"required,name"`
}
func (key HostConfigKey) defaultPath() (string, error) {
if key.Name == "" {
return "", errors.ErrorInsufficientIdentifiers{Name: "name"}
}
if key.Hostname == "" {
return "", errors.ErrorInsufficientIdentifiers{Name: "node"}
}
e := fmt.Sprintf("/calico/v1/host/%s/config/%s", key.Hostname, key.Name)
return e, nil
}
func (key HostConfigKey) defaultDeletePath() (string, error) {
return key.defaultPath()
}
func (key HostConfigKey) defaultDeleteParentPaths() ([]string, error) {
return nil, nil
}
func (key HostConfigKey) valueType() (reflect.Type, error) {
return typeHostConfig, nil
}
func (key HostConfigKey) String() string {
return fmt.Sprintf("HostConfig(node=%s,name=%s)", key.Hostname, key.Name)
}
type HostConfigListOptions struct {
Hostname string
Name string
}
func (options HostConfigListOptions) defaultPathRoot() string {
k := "/calico/v1/host"
if options.Hostname == "" {
return k
}
k = k + fmt.Sprintf("/%s/config", options.Hostname)
if options.Name == "" {
return k
}
k = k + fmt.Sprintf("/%s", options.Name)
return k
}
func (options HostConfigListOptions) KeyFromDefaultPath(path string) Key {
log.Debugf("Get HostConfig key from %s", path)
r := matchHostConfig.FindAllStringSubmatch(path, -1)
if len(r) != 1 {
log.Debugf("Didn't match regex")
return nil
}
hostname := r[0][1]
name := r[0][2]
if options.Hostname != "" && hostname != options.Hostname {
log.Debugf("Didn't match hostname %s != %s", options.Hostname, hostname)
return nil
}
if options.Name != "" && name != options.Name {
log.Debugf("Didn't match name %s != %s", options.Name, name)
return nil
}
return HostConfigKey{Hostname: hostname, Name: name}
}

View File

@@ -0,0 +1,113 @@
// Copyright (c) 2016-2017 Tigera, Inc. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import (
"fmt"
"regexp"
"reflect"
log "github.com/sirupsen/logrus"
"github.com/projectcalico/calico/libcalico-go/lib/errors"
"github.com/projectcalico/calico/libcalico-go/lib/net"
)
var (
matchHostEndpoint = regexp.MustCompile("^/?calico/v1/host/([^/]+)/endpoint/([^/]+)$")
typeHostEndpoint = reflect.TypeOf(HostEndpoint{})
)
type HostEndpointKey struct {
Hostname string `json:"-" validate:"required,hostname"`
EndpointID string `json:"-" validate:"required,namespacedName"`
}
func (key HostEndpointKey) defaultPath() (string, error) {
if key.Hostname == "" {
return "", errors.ErrorInsufficientIdentifiers{Name: "node"}
}
if key.EndpointID == "" {
return "", errors.ErrorInsufficientIdentifiers{Name: "name"}
}
e := fmt.Sprintf("/calico/v1/host/%s/endpoint/%s",
key.Hostname, escapeName(key.EndpointID))
return e, nil
}
func (key HostEndpointKey) defaultDeletePath() (string, error) {
return key.defaultPath()
}
func (key HostEndpointKey) defaultDeleteParentPaths() ([]string, error) {
return nil, nil
}
func (key HostEndpointKey) valueType() (reflect.Type, error) {
return typeHostEndpoint, nil
}
func (key HostEndpointKey) String() string {
return fmt.Sprintf("HostEndpoint(node=%s, name=%s)", key.Hostname, key.EndpointID)
}
type HostEndpointListOptions struct {
Hostname string
EndpointID string
}
func (options HostEndpointListOptions) defaultPathRoot() string {
k := "/calico/v1/host"
if options.Hostname == "" {
return k
}
k = k + fmt.Sprintf("/%s/endpoint", options.Hostname)
if options.EndpointID == "" {
return k
}
k = k + fmt.Sprintf("/%s", escapeName(options.EndpointID))
return k
}
func (options HostEndpointListOptions) KeyFromDefaultPath(path string) Key {
log.Debugf("Get HostEndpoint key from %s", path)
r := matchHostEndpoint.FindAllStringSubmatch(path, -1)
if len(r) != 1 {
log.Debugf("Didn't match regex")
return nil
}
hostname := r[0][1]
endpointID := unescapeName(r[0][2])
if options.Hostname != "" && hostname != options.Hostname {
log.Debugf("Didn't match hostname %s != %s", options.Hostname, hostname)
return nil
}
if options.EndpointID != "" && endpointID != options.EndpointID {
log.Debugf("Didn't match endpointID %s != %s", options.EndpointID, endpointID)
return nil
}
return HostEndpointKey{Hostname: hostname, EndpointID: endpointID}
}
type HostEndpoint struct {
Name string `json:"name,omitempty" validate:"omitempty,interface"`
ExpectedIPv4Addrs []net.IP `json:"expected_ipv4_addrs,omitempty" validate:"omitempty,dive,ipv4"`
ExpectedIPv6Addrs []net.IP `json:"expected_ipv6_addrs,omitempty" validate:"omitempty,dive,ipv6"`
Labels map[string]string `json:"labels,omitempty" validate:"omitempty,labels"`
ProfileIDs []string `json:"profile_ids,omitempty" validate:"omitempty,dive,name"`
Ports []EndpointPort `json:"ports,omitempty" validate:"dive"`
}

View File

@@ -0,0 +1,107 @@
// Copyright (c) 2016 Tigera, Inc. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import (
"fmt"
"regexp"
"reflect"
log "github.com/sirupsen/logrus"
"github.com/projectcalico/calico/libcalico-go/lib/errors"
)
var (
matchHostEndpointStatus = regexp.MustCompile("^/?calico/felix/v1/host/([^/]+)/endpoint/([^/]+)$")
typeHostEndpointStatus = reflect.TypeOf(HostEndpointStatus{})
)
type HostEndpointStatusKey struct {
Hostname string `json:"-" validate:"required,hostname"`
EndpointID string `json:"-" validate:"required,namespacedName"`
}
func (key HostEndpointStatusKey) defaultPath() (string, error) {
if key.Hostname == "" {
return "", errors.ErrorInsufficientIdentifiers{Name: "node"}
}
if key.EndpointID == "" {
return "", errors.ErrorInsufficientIdentifiers{Name: "name"}
}
e := fmt.Sprintf("/calico/felix/v1/host/%s/endpoint/%s",
key.Hostname, escapeName(key.EndpointID))
return e, nil
}
func (key HostEndpointStatusKey) defaultDeletePath() (string, error) {
return key.defaultPath()
}
func (key HostEndpointStatusKey) defaultDeleteParentPaths() ([]string, error) {
return nil, nil
}
func (key HostEndpointStatusKey) valueType() (reflect.Type, error) {
return typeHostEndpointStatus, nil
}
func (key HostEndpointStatusKey) String() string {
return fmt.Sprintf("HostEndpointStatus(hostname=%s, name=%s)", key.Hostname, key.EndpointID)
}
type HostEndpointStatusListOptions struct {
Hostname string
EndpointID string
}
func (options HostEndpointStatusListOptions) defaultPathRoot() string {
k := "/calico/felix/v1/host"
if options.Hostname == "" {
return k
}
k = k + fmt.Sprintf("/%s/endpoint", options.Hostname)
if options.EndpointID == "" {
return k
}
k = k + fmt.Sprintf("/%s", escapeName(options.EndpointID))
return k
}
func (options HostEndpointStatusListOptions) KeyFromDefaultPath(ekey string) Key {
log.Debugf("Get HostEndpointStatus key from %s", ekey)
r := matchHostEndpointStatus.FindAllStringSubmatch(ekey, -1)
if len(r) != 1 {
log.Debugf("Didn't match regex")
return nil
}
hostname := r[0][1]
endpointID := unescapeName(r[0][2])
if options.Hostname != "" && hostname != options.Hostname {
log.Debugf("Didn't match hostname %s != %s", options.Hostname, hostname)
return nil
}
if options.EndpointID != "" && endpointID != options.EndpointID {
log.Debugf("Didn't match endpointID %s != %s", options.EndpointID, endpointID)
return nil
}
return HostEndpointStatusKey{Hostname: hostname, EndpointID: endpointID}
}
type HostEndpointStatus struct {
Status string `json:"status"`
}

View File

@@ -0,0 +1,53 @@
// Copyright (c) 2016 Tigera, Inc. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import (
"reflect"
)
const (
IPAMConfigGlobalName = "default"
)
var typeIPAMConfig = reflect.TypeOf(IPAMConfig{})
type IPAMConfigKey struct{}
func (key IPAMConfigKey) defaultPath() (string, error) {
return "/calico/ipam/v2/config", nil
}
func (key IPAMConfigKey) defaultDeletePath() (string, error) {
return key.defaultPath()
}
func (key IPAMConfigKey) defaultDeleteParentPaths() ([]string, error) {
return nil, nil
}
func (key IPAMConfigKey) valueType() (reflect.Type, error) {
return typeIPAMConfig, nil
}
func (key IPAMConfigKey) String() string {
return "IPAMConfigKey()"
}
type IPAMConfig struct {
StrictAffinity bool `json:"strict_affinity,omitempty"`
AutoAllocateBlocks bool `json:"auto_allocate_blocks,omitempty"`
MaxBlocksPerHost int `json:"maxBlocksPerHost,omitempty"`
}

View File

@@ -0,0 +1,84 @@
// Copyright (c) 2016,2020 Tigera, Inc. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import (
"fmt"
"reflect"
"regexp"
log "github.com/sirupsen/logrus"
"github.com/projectcalico/calico/libcalico-go/lib/errors"
)
var (
matchHandle = regexp.MustCompile("^/?calico/ipam/v2/handle/([^/]+)$")
typeHandle = reflect.TypeOf(IPAMHandle{})
)
type IPAMHandleKey struct {
HandleID string `json:"id"`
}
func (key IPAMHandleKey) defaultPath() (string, error) {
if key.HandleID == "" {
return "", errors.ErrorInsufficientIdentifiers{}
}
e := fmt.Sprintf("/calico/ipam/v2/handle/%s", key.HandleID)
return e, nil
}
func (key IPAMHandleKey) defaultDeletePath() (string, error) {
return key.defaultPath()
}
func (key IPAMHandleKey) defaultDeleteParentPaths() ([]string, error) {
return nil, nil
}
func (key IPAMHandleKey) valueType() (reflect.Type, error) {
return typeHandle, nil
}
func (key IPAMHandleKey) String() string {
return fmt.Sprintf("IPAMHandleKey(id=%s)", key.HandleID)
}
type IPAMHandleListOptions struct {
// TODO: Have some options here?
}
func (options IPAMHandleListOptions) defaultPathRoot() string {
k := "/calico/ipam/v2/handle/"
// TODO: Allow filtering on individual host?
return k
}
func (options IPAMHandleListOptions) KeyFromDefaultPath(path string) Key {
log.Debugf("Get IPAM handle key from %s", path)
r := matchHandle.FindAllStringSubmatch(path, -1)
if len(r) != 1 {
log.Debugf("%s didn't match regex", path)
return nil
}
return IPAMHandleKey{HandleID: r[0][1]}
}
type IPAMHandle struct {
HandleID string `json:"-"`
Block map[string]int `json:"block"`
Deleted bool `json:"deleted"`
}

View File

@@ -0,0 +1,58 @@
// Copyright (c) 2016 Tigera, Inc. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import (
"fmt"
"reflect"
"github.com/projectcalico/calico/libcalico-go/lib/errors"
)
var (
typeIPAMHost = reflect.TypeOf(IPAMHost{})
)
type IPAMHostKey struct {
Host string
}
func (key IPAMHostKey) defaultPath() (string, error) {
if key.Host == "" {
return "", errors.ErrorInsufficientIdentifiers{Name: "host"}
}
k := "/calico/ipam/v2/host/" + key.Host
return k, nil
}
func (key IPAMHostKey) defaultDeletePath() (string, error) {
return key.defaultPath()
}
func (key IPAMHostKey) defaultDeleteParentPaths() ([]string, error) {
return nil, nil
}
func (key IPAMHostKey) valueType() (reflect.Type, error) {
return typeIPAMHost, nil
}
func (key IPAMHostKey) String() string {
return fmt.Sprintf("IPAMHostKey(host=%s)", key.Host)
}
type IPAMHost struct {
}

View File

@@ -0,0 +1,107 @@
// Copyright (c) 2016,2021 Tigera, Inc. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import (
"fmt"
"reflect"
"regexp"
"strings"
log "github.com/sirupsen/logrus"
"github.com/projectcalico/calico/libcalico-go/lib/backend/encap"
"github.com/projectcalico/calico/libcalico-go/lib/errors"
"github.com/projectcalico/calico/libcalico-go/lib/net"
)
var (
matchIPPool = regexp.MustCompile("^/?calico/v1/ipam/v./pool/([^/]+)$")
typeIPPool = reflect.TypeOf(IPPool{})
)
type IPPoolKey struct {
CIDR net.IPNet `json:"-" validate:"required,name"`
}
func (key IPPoolKey) defaultPath() (string, error) {
if key.CIDR.IP == nil {
return "", errors.ErrorInsufficientIdentifiers{Name: "cidr"}
}
c := strings.Replace(key.CIDR.String(), "/", "-", 1)
e := fmt.Sprintf("/calico/v1/ipam/v%d/pool/%s", key.CIDR.Version(), c)
return e, nil
}
func (key IPPoolKey) defaultDeletePath() (string, error) {
return key.defaultPath()
}
func (key IPPoolKey) defaultDeleteParentPaths() ([]string, error) {
return nil, nil
}
func (key IPPoolKey) valueType() (reflect.Type, error) {
return typeIPPool, nil
}
func (key IPPoolKey) String() string {
return fmt.Sprintf("IPPool(cidr=%s)", key.CIDR)
}
type IPPoolListOptions struct {
CIDR net.IPNet
}
func (options IPPoolListOptions) defaultPathRoot() string {
k := "/calico/v1/ipam/"
if options.CIDR.IP == nil {
return k
}
c := strings.Replace(options.CIDR.String(), "/", "-", 1)
k = k + fmt.Sprintf("v%d/pool/", options.CIDR.Version()) + fmt.Sprintf("%s", c)
return k
}
func (options IPPoolListOptions) KeyFromDefaultPath(path string) Key {
log.Debugf("Get Pool key from %s", path)
r := matchIPPool.FindAllStringSubmatch(path, -1)
if len(r) != 1 {
log.Debugf("%s didn't match regex", path)
return nil
}
cidrStr := strings.Replace(r[0][1], "-", "/", 1)
_, cidr, err := net.ParseCIDR(cidrStr)
if err != nil {
log.WithError(err).Warningf("Failed to parse CIDR %s", cidrStr)
return nil
}
if options.CIDR.IP != nil && !reflect.DeepEqual(*cidr, options.CIDR) {
log.Debugf("Didn't match cidr %s != %s", options.CIDR.String(), cidr.String())
return nil
}
return IPPoolKey{CIDR: *cidr}
}
type IPPool struct {
CIDR net.IPNet `json:"cidr"`
IPIPInterface string `json:"ipip"`
IPIPMode encap.Mode `json:"ipip_mode"`
VXLANMode encap.Mode `json:"vxlan_mode"`
Masquerade bool `json:"masquerade"`
IPAM bool `json:"ipam"`
Disabled bool `json:"disabled"`
DisableBGPExport bool `json:"disableBGPExport"`
}

View File

@@ -0,0 +1,627 @@
// Copyright (c) 2016-2020 Tigera, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import (
"bytes"
"fmt"
net2 "net"
"reflect"
"strings"
"time"
log "github.com/sirupsen/logrus"
"k8s.io/apimachinery/pkg/types"
"github.com/projectcalico/calico/libcalico-go/lib/json"
"github.com/projectcalico/calico/libcalico-go/lib/namespace"
"github.com/projectcalico/calico/libcalico-go/lib/net"
)
// RawString is used a value type to indicate that the value is a bare non-JSON string
type rawString string
type rawBool bool
type rawIP net.IP
var rawStringType = reflect.TypeOf(rawString(""))
var rawBoolType = reflect.TypeOf(rawBool(true))
var rawIPType = reflect.TypeOf(rawIP{})
// Key represents a parsed datastore key.
type Key interface {
// defaultPath() returns a common path representation of the object used by
// etcdv3 and other datastores.
defaultPath() (string, error)
// defaultDeletePath() returns a common path representation used by etcdv3
// and other datastores to delete the object.
defaultDeletePath() (string, error)
// defaultDeleteParentPaths() returns an ordered slice of paths that should
// be removed after deleting the primary path (given by defaultDeletePath),
// provided there are no child entries associated with those paths. This is
// only used by directory based KV stores (such as etcdv3). With a directory
// based KV store, creation of a resource may also create parent directory entries
// that could be shared by multiple resources, and therefore the parent directories
// can only be removed when there are no more resources under them. The list of
// parent paths is ordered, and directories should be removed in the order supplied
// in the slice and only if the directory is empty.
defaultDeleteParentPaths() ([]string, error)
// valueType returns the object type associated with this key.
valueType() (reflect.Type, error)
// String returns a unique string representation of this key. The string
// returned by this method must uniquely identify this Key.
String() string
}
// Interface used to perform datastore lookups.
type ListInterface interface {
// defaultPathRoot() returns a default stringified root path, i.e. path
// to the directory containing all the keys to be listed.
defaultPathRoot() string
// BUG(smc) I think we should remove this and use the package KeyFromDefaultPath function.
// KeyFromDefaultPath parses the default path representation of the
// Key type for this list. It returns nil if passed a different kind
// of path.
KeyFromDefaultPath(key string) Key
}
// KVPair holds a typed key and value object as well as datastore specific
// revision information.
//
// The Value is dependent on the Key, but in general will be on of the following
// types:
// - A pointer to a struct
// - A slice or map
// - A bare string, boolean value or IP address (i.e. without quotes, so not
// JSON format).
type KVPair struct {
Key Key
Value interface{}
Revision string
UID *types.UID
TTL time.Duration // For writes, if non-zero, key has a TTL.
}
// KVPairList hosts a slice of KVPair structs and a Revision, returned from a Ls
type KVPairList struct {
KVPairs []*KVPair
Revision string
}
// KeyToDefaultPath converts one of the Keys from this package into a unique
// '/'-delimited path, which is suitable for use as the key when storing the
// value in a hierarchical (i.e. one with directories and leaves) key/value
// datastore such as etcd v3.
//
// Each unique key returns a unique path.
//
// Keys with a hierarchical relationship share a common prefix. However, in
// order to support datastores that do not support storing data at non-leaf
// nodes in the hierarchy (such as etcd v3), the path returned for a "parent"
// key, is not a direct ancestor of its children.
func KeyToDefaultPath(key Key) (string, error) {
return key.defaultPath()
}
// KeyToDefaultDeletePath converts one of the Keys from this package into a
// unique '/'-delimited path, which is suitable for use as the key when
// (recursively) deleting the value from a hierarchical (i.e. one with
// directories and leaves) key/value datastore such as etcd v3.
//
// KeyToDefaultDeletePath returns a different path to KeyToDefaultPath when
// it is a passed a Key that represents a non-leaf which, for example, has its
// own metadata but also contains other resource types as children.
//
// KeyToDefaultDeletePath returns the common prefix of the non-leaf key and
// its children so that a recursive delete of that key would delete the
// object itself and any children it has.
func KeyToDefaultDeletePath(key Key) (string, error) {
return key.defaultDeletePath()
}
// KeyToDefaultDeleteParentPaths returns a slice of '/'-delimited
// paths which are used to delete parent entries that may be auto-created
// by directory-based KV stores (e.g. etcd v3). These paths should also be
// removed provided they have no more child entries.
//
// The list of parent paths is ordered, and directories should be removed
// in the order supplied in the slice and only if the directory is empty.
//
// For example,
//
// KeyToDefaultDeletePaths(WorkloadEndpointKey{
// Nodename: "h",
// OrchestratorID: "o",
// WorkloadID: "w",
// EndpointID: "e",
// })
//
// returns
//
// ["/calico/v1/host/h/workload/o/w/endpoint",
//
// "/calico/v1/host/h/workload/o/w"]
//
// indicating that these paths should also be deleted when they are empty.
// In this example it is equivalent to deleting the workload when there are
// no more endpoints in the workload.
func KeyToDefaultDeleteParentPaths(key Key) ([]string, error) {
return key.defaultDeleteParentPaths()
}
// ListOptionsToDefaultPathRoot converts list options struct into a
// common-prefix path suitable for querying a datastore that uses the paths
// returned by KeyToDefaultPath.
func ListOptionsToDefaultPathRoot(listOptions ListInterface) string {
return listOptions.defaultPathRoot()
}
// ListOptionsIsFullyQualified returns true if the options actually specify a fully
// qualified resource rather than a partial match.
func ListOptionsIsFullyQualified(listOptions ListInterface) bool {
// Construct the path prefix and then check to see if that actually corresponds to
// the path of a resource instance.
return listOptions.KeyFromDefaultPath(listOptions.defaultPathRoot()) != nil
}
// IsListOptionsLastSegmentPrefix returns true if the final segment of the default path
// root is a name prefix rather than the full name.
func IsListOptionsLastSegmentPrefix(listOptions ListInterface) bool {
// Only supported for ResourceListOptions.
rl, ok := listOptions.(ResourceListOptions)
return ok && rl.IsLastSegmentIsPrefix()
}
// KeyFromDefaultPath parses the default path representation of a key into one
// of our <Type>Key structs. Returns nil if the string doesn't match one of
// our key types.
func KeyFromDefaultPath(path string) Key {
// "v3" resource keys strictly require a leading slash but older "v1" keys were permissive.
// For ease of parsing, strip the slash off now but pass it down to keyFromDefaultPathInner so
// it can check for it later.
normalizedPath := path
if strings.HasPrefix(normalizedPath, "/") {
normalizedPath = normalizedPath[1:]
}
parts := strings.Split(normalizedPath, "/")
if len(parts) < 3 {
// After removing the optional `/` prefix, should have at least 3 segments.
return nil
}
return keyFromDefaultPathInner(path, parts)
}
func keyFromDefaultPathInner(path string, parts []string) Key {
if parts[0] != "calico" {
return nil
}
switch parts[1] {
case "v1":
switch parts[2] {
case "ipam":
return IPPoolListOptions{}.KeyFromDefaultPath(path)
case "config":
return GlobalConfigKey{Name: strings.Join(parts[3:], "/")}
case "host":
if len(parts) < 5 {
return nil
}
hostname := parts[3]
switch parts[4] {
case "workload":
if len(parts) != 9 || parts[7] != "endpoint" {
return nil
}
return WorkloadEndpointKey{
Hostname: unescapeName(hostname),
OrchestratorID: unescapeName(parts[5]),
WorkloadID: unescapeName(parts[6]),
EndpointID: unescapeName(parts[8]),
}
case "endpoint":
if len(parts) != 6 {
return nil
}
return HostEndpointKey{
Hostname: unescapeName(hostname),
EndpointID: unescapeName(parts[5]),
}
case "config":
return HostConfigKey{
Hostname: hostname,
Name: strings.Join(parts[5:], "/"),
}
case "metadata":
if len(parts) != 5 {
return nil
}
return HostMetadataKey{
Hostname: hostname,
}
case "bird_ip":
if len(parts) != 5 {
return nil
}
return HostIPKey{
Hostname: hostname,
}
case "wireguard":
if len(parts) != 5 {
return nil
}
return WireguardKey{
NodeName: hostname,
}
}
case "netset":
if len(parts) != 4 {
return nil
}
return NetworkSetKey{
Name: unescapeName(parts[3]),
}
case "Ready":
if len(parts) > 3 || path[0] != '/' {
return nil
}
return ReadyFlagKey{}
case "policy":
if len(parts) < 6 {
return nil
}
switch parts[3] {
case "tier":
if len(parts) < 6 {
return nil
}
switch parts[5] {
case "policy":
if len(parts) != 7 {
return nil
}
return PolicyKey{
Name: unescapeName(parts[6]),
}
}
case "profile":
pk := unescapeName(parts[4])
switch parts[5] {
case "rules":
return ProfileRulesKey{ProfileKey: ProfileKey{pk}}
case "labels":
return ProfileLabelsKey{ProfileKey: ProfileKey{pk}}
}
}
}
case "bgp":
switch parts[2] {
case "v1":
if len(parts) < 5 {
return nil
}
switch parts[3] {
case "global":
return GlobalBGPConfigListOptions{}.KeyFromDefaultPath(path)
case "host":
if len(parts) < 6 {
return nil
}
return NodeBGPConfigListOptions{}.KeyFromDefaultPath(path)
}
}
case "ipam":
if len(parts) < 5 {
return nil
}
switch parts[2] {
case "v2":
switch parts[3] {
case "assignment":
return BlockListOptions{}.KeyFromDefaultPath(path)
case "handle":
if len(parts) > 5 {
return nil
}
return IPAMHandleKey{
HandleID: parts[4],
}
case "host":
return BlockAffinityListOptions{}.KeyFromDefaultPath(path)
}
}
case "resources":
switch parts[2] {
case "v3":
// v3 resource keys strictly require the leading slash.
if len(parts) < 6 || parts[3] != "projectcalico.org" || path[0] != '/' {
return nil
}
switch len(parts) {
case 6:
ri, ok := resourceInfoByPlural[unescapeName(parts[4])]
if !ok {
log.Warnf("(BUG) unknown resource type: %v", path)
return nil
}
if namespace.IsNamespaced(ri.kind) {
log.Warnf("(BUG) Path is a global resource, but resource is namespaced: %v", path)
return nil
}
log.Debugf("Path is a global resource: %v", path)
return ResourceKey{
Kind: ri.kind,
Name: unescapeName(parts[5]),
}
case 7:
ri, ok := resourceInfoByPlural[unescapeName(parts[4])]
if !ok {
log.Warnf("(BUG) unknown resource type: %v", path)
return nil
}
if !namespace.IsNamespaced(ri.kind) {
log.Warnf("(BUG) Path is a namespaced resource, but resource is global: %v", path)
return nil
}
log.Debugf("Path is a namespaced resource: %v", path)
return ResourceKey{
Kind: ri.kind,
Namespace: unescapeName(parts[5]),
Name: unescapeName(parts[6]),
}
}
}
case "felix":
if len(parts) < 4 {
return nil
}
switch parts[2] {
case "v1":
switch parts[3] {
case "host":
if len(parts) != 7 || parts[5] != "endpoint" {
return nil
}
return HostEndpointStatusKey{
Hostname: parts[4],
EndpointID: unescapeName(parts[6]),
}
}
case "v2":
if len(parts) < 7 {
return nil
}
if parts[4] != "host" {
return nil
}
switch parts[6] {
case "status":
return ActiveStatusReportListOptions{}.KeyFromDefaultPath(path)
case "last_reported_status":
return LastStatusReportListOptions{}.KeyFromDefaultPath(path)
case "workload":
return WorkloadEndpointStatusListOptions{}.KeyFromDefaultPath(path)
}
}
}
log.Debugf("Path is unknown: %v", path)
return nil
}
// OldKeyFromDefaultPath is the old, (slower) implementation of KeyFromDefaultPath. It is kept to allow
// fuzzing the new version against it. Parses the default path representation of a key into one
// of our <Type>Key structs. Returns nil if the string doesn't match one of
// our key types.
func OldKeyFromDefaultPath(path string) Key {
if m := matchWorkloadEndpoint.FindStringSubmatch(path); m != nil {
log.Debugf("Path is a workload endpoint: %v", path)
return WorkloadEndpointKey{
Hostname: unescapeName(m[1]),
OrchestratorID: unescapeName(m[2]),
WorkloadID: unescapeName(m[3]),
EndpointID: unescapeName(m[4]),
}
} else if m := matchHostEndpoint.FindStringSubmatch(path); m != nil {
log.Debugf("Path is a host endpoint: %v", path)
return HostEndpointKey{
Hostname: unescapeName(m[1]),
EndpointID: unescapeName(m[2]),
}
} else if m := matchNetworkSet.FindStringSubmatch(path); m != nil {
log.Debugf("Path is a network set: %v", path)
return NetworkSetKey{
Name: unescapeName(m[1]),
}
} else if m := matchGlobalResource.FindStringSubmatch(path); m != nil {
ri, ok := resourceInfoByPlural[unescapeName(m[1])]
if !ok {
log.Warnf("(BUG) unknown resource type: %v", path)
return nil
}
if namespace.IsNamespaced(ri.kind) {
log.Warnf("(BUG) Path is a global resource, but resource is namespaced: %v", path)
return nil
}
log.Debugf("Path is a global resource: %v", path)
return ResourceKey{
Kind: ri.kind,
Name: unescapeName(m[2]),
}
} else if m := matchNamespacedResource.FindStringSubmatch(path); m != nil {
ri, ok := resourceInfoByPlural[unescapeName(m[1])]
if !ok {
log.Warnf("(BUG) unknown resource type: %v", path)
return nil
}
if !namespace.IsNamespaced(ri.kind) {
log.Warnf("(BUG) Path is a namespaced resource, but resource is global: %v", path)
return nil
}
log.Debugf("Path is a namespaced resource: %v", path)
return ResourceKey{
Kind: resourceInfoByPlural[unescapeName(m[1])].kind,
Namespace: unescapeName(m[2]),
Name: unescapeName(m[3]),
}
} else if m := matchPolicy.FindStringSubmatch(path); m != nil {
log.Debugf("Path is a policy: %v", path)
return PolicyKey{
Name: unescapeName(m[2]),
}
} else if m := matchProfile.FindStringSubmatch(path); m != nil {
log.Debugf("Path is a profile: %v (%v)", path, m[2])
pk := ProfileKey{unescapeName(m[1])}
switch m[2] {
case "rules":
log.Debugf("Profile rules")
return ProfileRulesKey{ProfileKey: pk}
case "labels":
log.Debugf("Profile labels")
return ProfileLabelsKey{ProfileKey: pk}
}
return nil
} else if m := matchHostIp.FindStringSubmatch(path); m != nil {
log.Debugf("Path is a host ID: %v", path)
return HostIPKey{Hostname: m[1]}
} else if m := matchWireguard.FindStringSubmatch(path); m != nil {
log.Debugf("Path is a node name: %v", path)
return WireguardKey{NodeName: m[1]}
} else if m := matchIPPool.FindStringSubmatch(path); m != nil {
log.Debugf("Path is a pool: %v", path)
mungedCIDR := m[1]
cidr := strings.Replace(mungedCIDR, "-", "/", 1)
_, c, err := net.ParseCIDR(cidr)
if err != nil {
log.WithError(err).Warningf("Failed to parse CIDR %s", cidr)
} else {
return IPPoolKey{CIDR: *c}
}
} else if m := matchGlobalConfig.FindStringSubmatch(path); m != nil {
log.Debugf("Path is a global felix config: %v", path)
return GlobalConfigKey{Name: m[1]}
} else if m := matchHostConfig.FindStringSubmatch(path); m != nil {
log.Debugf("Path is a host config: %v", path)
return HostConfigKey{Hostname: m[1], Name: m[2]}
} else if matchReadyFlag.MatchString(path) {
log.Debugf("Path is a ready flag: %v", path)
return ReadyFlagKey{}
} else if k := (NodeBGPConfigListOptions{}).KeyFromDefaultPath(path); k != nil {
return k
} else if k := (GlobalBGPConfigListOptions{}).KeyFromDefaultPath(path); k != nil {
return k
} else if k := (BlockAffinityListOptions{}).KeyFromDefaultPath(path); k != nil {
return k
} else if k := (BlockListOptions{}).KeyFromDefaultPath(path); k != nil {
return k
} else if k := (HostEndpointStatusListOptions{}).KeyFromDefaultPath(path); k != nil {
return k
} else if k := (WorkloadEndpointStatusListOptions{}).KeyFromDefaultPath(path); k != nil {
return k
} else if k := (ActiveStatusReportListOptions{}).KeyFromDefaultPath(path); k != nil {
return k
} else if k := (LastStatusReportListOptions{}).KeyFromDefaultPath(path); k != nil {
return k
} else {
log.Debugf("Path is unknown: %v", path)
}
// Not a key we know about.
return nil
}
// ParseValue parses the default JSON representation of our data into one of
// our value structs, according to the type of key. I.e. if passed a
// PolicyKey as the first parameter, it will try to parse rawData into a
// Policy struct.
func ParseValue(key Key, rawData []byte) (interface{}, error) {
valueType, err := key.valueType()
if err != nil {
return nil, err
}
if valueType == rawStringType {
return string(rawData), nil
}
if valueType == rawBoolType {
return string(rawData) == "true", nil
}
if valueType == rawIPType {
ip := net2.ParseIP(string(rawData))
if ip == nil {
return nil, nil
}
return &net.IP{IP: ip}, nil
}
value := reflect.New(valueType)
elem := value.Elem()
if elem.Kind() == reflect.Struct && elem.NumField() > 0 {
if elem.Field(0).Type() == reflect.ValueOf(key).Type() {
elem.Field(0).Set(reflect.ValueOf(key))
}
}
iface := value.Interface()
err = json.Unmarshal(rawData, iface)
if err != nil {
// This is a special case to address backwards compatibility from the time when we had no state information as block affinity value.
// example:
// Key: "/calico/ipam/v2/host/myhost.io/ipv4/block/172.29.82.0-26"
// Value: ""
// In 3.0.7 we added block affinity state as the value, so old "" value is no longer a valid JSON, so for that
// particular case we replace the "" with a "{}" so it can be parsed and we don't leak blocks after upgrade to Calico 3.0.7
// See: https://github.com/projectcalico/calico/issues/1956
if bytes.Equal(rawData, []byte(``)) && valueType == typeBlockAff {
rawData = []byte(`{}`)
if err = json.Unmarshal(rawData, iface); err != nil {
return nil, err
}
} else {
log.Warningf("Failed to unmarshal %#v into value %#v",
string(rawData), value)
return nil, err
}
}
if elem.Kind() != reflect.Struct {
// Pointer to a map or slice, unwrap.
iface = elem.Interface()
}
return iface, nil
}
// SerializeValue serializes a value in the model to a []byte to be stored in the datastore. This
// performs the opposite processing to ParseValue()
func SerializeValue(d *KVPair) ([]byte, error) {
valueType, err := d.Key.valueType()
if err != nil {
return nil, err
}
if d.Value == nil {
return json.Marshal(nil)
}
if valueType == rawStringType {
return []byte(d.Value.(string)), nil
}
if valueType == rawBoolType {
return []byte(fmt.Sprint(d.Value)), nil
}
if valueType == rawIPType {
return []byte(fmt.Sprint(d.Value)), nil
}
return json.Marshal(d.Value)
}

View File

@@ -0,0 +1,19 @@
// Copyright (c) 2021 Tigera, Inc. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
const (
KindKubernetesEndpointSlice = "KubernetesEndpointSlice"
)

View File

@@ -0,0 +1,19 @@
// Copyright (c) 2021 Tigera, Inc. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
const (
KindKubernetesNetworkPolicy = "KubernetesNetworkPolicy"
)

View File

@@ -0,0 +1,19 @@
// Copyright (c) 2021 Tigera, Inc. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
const (
KindKubernetesService = "KubernetesService"
)

View File

@@ -0,0 +1,30 @@
// Copyright (c) 2016 Tigera, Inc. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import "strings"
// escapeName removes any "/" from the name and URL encodes it to %2f,
// and necessarily removes % and encodes to %25.
func escapeName(name string) string {
name = strings.Replace(name, "%", "%25", -1)
return strings.Replace(name, "/", "%2f", -1)
}
// unescapeName replaces %2f and %25 in the name back to be a / and %.
func unescapeName(name string) string {
name = strings.Replace(name, "%2f", "/", -1)
return strings.Replace(name, "%25", "%", -1)
}

View File

@@ -0,0 +1,95 @@
// Copyright (c) 2017-2018 Tigera, Inc. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import (
"fmt"
"regexp"
"reflect"
log "github.com/sirupsen/logrus"
"github.com/projectcalico/calico/libcalico-go/lib/errors"
"github.com/projectcalico/calico/libcalico-go/lib/net"
)
var (
matchNetworkSet = regexp.MustCompile("^/?calico/v1/netset/([^/]+)$")
typeNetworkSet = reflect.TypeOf(NetworkSet{})
)
type NetworkSetKey struct {
Name string `json:"-" validate:"required,namespacedName"`
}
func (key NetworkSetKey) defaultPath() (string, error) {
if key.Name == "" {
return "", errors.ErrorInsufficientIdentifiers{Name: "name"}
}
e := fmt.Sprintf("/calico/v1/netset/%s", escapeName(key.Name))
return e, nil
}
func (key NetworkSetKey) defaultDeletePath() (string, error) {
return key.defaultPath()
}
func (key NetworkSetKey) defaultDeleteParentPaths() ([]string, error) {
return nil, nil
}
func (key NetworkSetKey) valueType() (reflect.Type, error) {
return typeNetworkSet, nil
}
func (key NetworkSetKey) String() string {
return fmt.Sprintf("NetworkSet(name=%s)", key.Name)
}
type NetworkSetListOptions struct {
Name string
}
func (options NetworkSetListOptions) defaultPathRoot() string {
k := "/calico/v1/netset"
if options.Name == "" {
return k
}
k = k + fmt.Sprintf("/%s", escapeName(options.Name))
return k
}
func (options NetworkSetListOptions) KeyFromDefaultPath(path string) Key {
log.Debugf("Get NetworkSet key from %s", path)
r := matchNetworkSet.FindAllStringSubmatch(path, -1)
if len(r) != 1 {
log.Debugf("Didn't match regex")
return nil
}
name := unescapeName(r[0][1])
if options.Name != "" && name != options.Name {
log.Debugf("Didn't match name %s != %s", options.Name, name)
return nil
}
return NetworkSetKey{Name: name}
}
type NetworkSet struct {
Nets []net.IPNet `json:"nets,omitempty" validate:"omitempty,dive,cidr"`
Labels map[string]string `json:"labels,omitempty" validate:"omitempty,labels"`
ProfileIDs []string `json:"profile_ids,omitempty" validate:"omitempty,dive,name"`
}

View File

@@ -0,0 +1,282 @@
// Copyright (c) 2016,2020 Tigera, Inc. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import (
goerrors "errors"
"fmt"
"regexp"
"reflect"
log "github.com/sirupsen/logrus"
"github.com/projectcalico/api/pkg/lib/numorstring"
"github.com/projectcalico/calico/libcalico-go/lib/errors"
"github.com/projectcalico/calico/libcalico-go/lib/net"
)
var (
typeNode = reflect.TypeOf(Node{})
typeHostMetadata = reflect.TypeOf(HostMetadata{})
typeOrchRefs = reflect.TypeOf([]OrchRef{})
typeHostIp = rawIPType
typeWireguard = reflect.TypeOf(Wireguard{})
matchHostMetadata = regexp.MustCompile(`^/?calico/v1/host/([^/]+)/metadata$`)
matchHostIp = regexp.MustCompile(`^/?calico/v1/host/([^/]+)/bird_ip$`)
matchWireguard = regexp.MustCompile(`^/?calico/v1/host/([^/]+)/wireguard$`)
)
type Node struct {
// Felix specific configuration
FelixIPv4 *net.IP
// Node specific labels
Labels map[string]string `json:"labels,omitempty"`
// BGP specific configuration
BGPIPv4Addr *net.IP
BGPIPv6Addr *net.IP
BGPIPv4Net *net.IPNet
BGPIPv6Net *net.IPNet
BGPASNumber *numorstring.ASNumber
OrchRefs []OrchRef `json:"orchRefs,omitempty"`
}
type OrchRef struct {
Orchestrator string `json:"orchestrator,omitempty"`
NodeName string `json:"nodeName,omitempty"`
}
type Wireguard struct {
InterfaceIPv4Addr *net.IP `json:"interfaceIPv4Addr,omitempty"`
PublicKey string `json:"publicKey,omitempty"`
InterfaceIPv6Addr *net.IP `json:"interfaceIPv6Addr,omitempty"`
PublicKeyV6 string `json:"publicKeyV6,omitempty"`
}
type NodeKey struct {
Hostname string
}
func (key NodeKey) defaultPath() (string, error) {
return "", goerrors.New("Node is a composite type, so not handled with a single path")
}
func (key NodeKey) defaultDeletePath() (string, error) {
return "", goerrors.New("Node is a composite type, so not handled with a single path")
}
func (key NodeKey) defaultDeleteParentPaths() ([]string, error) {
return nil, goerrors.New("Node is composite type, so not handled with a single path")
}
func (key NodeKey) valueType() (reflect.Type, error) {
return typeNode, nil
}
func (key NodeKey) String() string {
return fmt.Sprintf("Node(name=%s)", key.Hostname)
}
type NodeListOptions struct {
Hostname string
}
func (options NodeListOptions) defaultPathRoot() string {
return ""
}
func (options NodeListOptions) KeyFromDefaultPath(path string) Key {
return nil
}
// The node is a composite of the following subcomponents:
// - The host metadata. This is the primary subcomponent and is used to enumerate
// hosts. However, for backwards compatibility, the etcd driver needs to handle
// that this may not exist, and instead need to enumerate based on directory.
// - The host IPv4 address used by Calico to lock down IPIP traffic.
// - The BGP IPv4 and IPv6 addresses
// - The BGP ASN.
type HostMetadata struct {
}
type HostMetadataKey struct {
Hostname string
}
func (key HostMetadataKey) defaultPath() (string, error) {
if key.Hostname == "" {
return "", errors.ErrorInsufficientIdentifiers{Name: "name"}
}
return fmt.Sprintf("/calico/v1/host/%s/metadata", key.Hostname), nil
}
func (key HostMetadataKey) defaultDeletePath() (string, error) {
if key.Hostname == "" {
return "", errors.ErrorInsufficientIdentifiers{Name: "name"}
}
return fmt.Sprintf("/calico/v1/host/%s", key.Hostname), nil
}
func (key HostMetadataKey) defaultDeleteParentPaths() ([]string, error) {
return nil, nil
}
func (key HostMetadataKey) valueType() (reflect.Type, error) {
return typeHostMetadata, nil
}
func (key HostMetadataKey) String() string {
return fmt.Sprintf("Node(name=%s)", key.Hostname)
}
type HostMetadataListOptions struct {
Hostname string
}
func (options HostMetadataListOptions) defaultPathRoot() string {
if options.Hostname == "" {
return "/calico/v1/host"
} else {
return fmt.Sprintf("/calico/v1/host/%s/metadata", options.Hostname)
}
}
func (options HostMetadataListOptions) KeyFromDefaultPath(path string) Key {
log.Debugf("Get Node key from %s", path)
if r := matchHostMetadata.FindAllStringSubmatch(path, -1); len(r) == 1 {
return HostMetadataKey{Hostname: r[0][1]}
} else {
log.Debugf("%s didn't match regex", path)
return nil
}
}
// The Felix Host IP Key.
type HostIPKey struct {
Hostname string
}
func (key HostIPKey) defaultPath() (string, error) {
return fmt.Sprintf("/calico/v1/host/%s/bird_ip",
key.Hostname), nil
}
func (key HostIPKey) defaultDeletePath() (string, error) {
return key.defaultPath()
}
func (key HostIPKey) defaultDeleteParentPaths() ([]string, error) {
return nil, nil
}
func (key HostIPKey) valueType() (reflect.Type, error) {
return typeHostIp, nil
}
func (key HostIPKey) String() string {
return fmt.Sprintf("Node(name=%s)", key.Hostname)
}
type OrchRefKey struct {
Hostname string
}
func (key OrchRefKey) defaultPath() (string, error) {
return fmt.Sprintf("/calico/v1/host/%s/orchestrator_refs",
key.Hostname), nil
}
func (key OrchRefKey) defaultDeletePath() (string, error) {
return key.defaultPath()
}
func (key OrchRefKey) defaultDeleteParentPaths() ([]string, error) {
return nil, nil
}
func (key OrchRefKey) valueType() (reflect.Type, error) {
return typeOrchRefs, nil
}
func (key OrchRefKey) String() string {
return fmt.Sprintf("OrchRefs(nodename=%s)", key.Hostname)
}
type OrchRefListOptions struct {
Hostname string
}
func (options OrchRefListOptions) defaultPathRoot() string {
return fmt.Sprintf("/calico/v1/host/%s/orchestrator_refs", options.Hostname)
}
func (options OrchRefListOptions) KeyFromDefaultPath(path string) Key {
return OrchRefKey{Hostname: options.Hostname}
}
// The Felix Wireguard Key.
type WireguardKey struct {
NodeName string
}
func (key WireguardKey) defaultPath() (string, error) {
if key.NodeName == "" {
return "", errors.ErrorInsufficientIdentifiers{Name: "name"}
}
return fmt.Sprintf("/calico/v1/host/%s/wireguard",
key.NodeName), nil
}
func (key WireguardKey) defaultDeletePath() (string, error) {
return key.defaultPath()
}
func (key WireguardKey) defaultDeleteParentPaths() ([]string, error) {
return nil, nil
}
func (key WireguardKey) valueType() (reflect.Type, error) {
return typeWireguard, nil
}
func (key WireguardKey) String() string {
return fmt.Sprintf("Node(nodename=%s)", key.NodeName)
}
type WireguardListOptions struct {
NodeName string
}
func (options WireguardListOptions) defaultPathRoot() string {
if options.NodeName == "" {
return "/calico/v1/host"
} else {
return fmt.Sprintf("/calico/v1/host/%s/wireguard", options.NodeName)
}
}
func (options WireguardListOptions) KeyFromDefaultPath(path string) Key {
log.Debugf("Get Node key from %s", path)
if r := matchWireguard.FindAllStringSubmatch(path, -1); len(r) == 1 {
return WireguardKey{NodeName: r[0][1]}
} else {
log.Debugf("%s didn't match regex", path)
return nil
}
}

View File

@@ -0,0 +1,126 @@
// Copyright (c) 2016-2018 Tigera, Inc. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import (
"fmt"
"regexp"
"reflect"
"strings"
log "github.com/sirupsen/logrus"
"github.com/projectcalico/calico/libcalico-go/lib/errors"
)
var (
matchPolicy = regexp.MustCompile("^/?calico/v1/policy/tier/([^/]+)/policy/([^/]+)$")
typePolicy = reflect.TypeOf(Policy{})
)
type PolicyKey struct {
Name string `json:"-" validate:"required,name"`
}
func (key PolicyKey) defaultPath() (string, error) {
if key.Name == "" {
return "", errors.ErrorInsufficientIdentifiers{Name: "name"}
}
e := fmt.Sprintf("/calico/v1/policy/tier/default/policy/%s",
escapeName(key.Name))
return e, nil
}
func (key PolicyKey) defaultDeletePath() (string, error) {
return key.defaultPath()
}
func (key PolicyKey) defaultDeleteParentPaths() ([]string, error) {
return nil, nil
}
func (key PolicyKey) valueType() (reflect.Type, error) {
return typePolicy, nil
}
func (key PolicyKey) String() string {
return fmt.Sprintf("Policy(name=%s)", key.Name)
}
type PolicyListOptions struct {
Name string
}
func (options PolicyListOptions) defaultPathRoot() string {
k := "/calico/v1/policy/tier/default/policy"
if options.Name == "" {
return k
}
k = k + fmt.Sprintf("/%s", escapeName(options.Name))
return k
}
func (options PolicyListOptions) KeyFromDefaultPath(path string) Key {
log.Debugf("Get Policy key from %s", path)
r := matchPolicy.FindAllStringSubmatch(path, -1)
if len(r) != 1 {
log.Debugf("Didn't match regex")
return nil
}
name := unescapeName(r[0][2])
if options.Name != "" && name != options.Name {
log.Debugf("Didn't match name %s != %s", options.Name, name)
return nil
}
return PolicyKey{Name: name}
}
type Policy struct {
Namespace string `json:"namespace,omitempty" validate:"omitempty"`
Order *float64 `json:"order,omitempty" validate:"omitempty"`
InboundRules []Rule `json:"inbound_rules,omitempty" validate:"omitempty,dive"`
OutboundRules []Rule `json:"outbound_rules,omitempty" validate:"omitempty,dive"`
Selector string `json:"selector" validate:"selector"`
DoNotTrack bool `json:"untracked,omitempty"`
Annotations map[string]string `json:"annotations,omitempty"`
PreDNAT bool `json:"pre_dnat,omitempty"`
ApplyOnForward bool `json:"apply_on_forward,omitempty"`
Types []string `json:"types,omitempty"`
}
func (p Policy) String() string {
parts := make([]string, 0)
if p.Order != nil {
parts = append(parts, fmt.Sprintf("order:%v", *p.Order))
}
parts = append(parts, fmt.Sprintf("selector:%#v", p.Selector))
inRules := make([]string, len(p.InboundRules))
for ii, rule := range p.InboundRules {
inRules[ii] = rule.String()
}
parts = append(parts, fmt.Sprintf("inbound:%v", strings.Join(inRules, ";")))
outRules := make([]string, len(p.OutboundRules))
for ii, rule := range p.OutboundRules {
outRules[ii] = rule.String()
}
parts = append(parts, fmt.Sprintf("outbound:%v", strings.Join(outRules, ";")))
parts = append(parts, fmt.Sprintf("untracked:%v", p.DoNotTrack))
parts = append(parts, fmt.Sprintf("pre_dnat:%v", p.PreDNAT))
parts = append(parts, fmt.Sprintf("apply_on_forward:%v", p.ApplyOnForward))
parts = append(parts, fmt.Sprintf("types:%v", strings.Join(p.Types, ";")))
return strings.Join(parts, ",")
}

View File

@@ -0,0 +1,208 @@
// Copyright (c) 2016 Tigera, Inc. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import (
"fmt"
"regexp"
"reflect"
"sort"
log "github.com/sirupsen/logrus"
"github.com/projectcalico/calico/libcalico-go/lib/errors"
)
var (
matchProfile = regexp.MustCompile("^/?calico/v1/policy/profile/([^/]+)/(rules|labels)$")
typeProfile = reflect.TypeOf(Profile{})
)
// The profile key actually returns the common parent of the three separate entries.
// It is useful to define this to re-use some of the common machinery, and can be used
// for delete processing since delete needs to remove the common parent.
type ProfileKey struct {
Name string `json:"-" validate:"required,name"`
}
func (key ProfileKey) defaultPath() (string, error) {
if key.Name == "" {
return "", errors.ErrorInsufficientIdentifiers{Name: "name"}
}
e := fmt.Sprintf("/calico/v1/policy/profile/%s", escapeName(key.Name))
return e, nil
}
func (key ProfileKey) defaultDeletePath() (string, error) {
return key.defaultPath()
}
func (key ProfileKey) defaultDeleteParentPaths() ([]string, error) {
return nil, nil
}
func (key ProfileKey) valueType() (reflect.Type, error) {
return typeProfile, nil
}
func (key ProfileKey) String() string {
return fmt.Sprintf("Profile(name=%s)", key.Name)
}
// ProfileRulesKey implements the KeyInterface for the profile rules
type ProfileRulesKey struct {
ProfileKey
}
func (key ProfileRulesKey) defaultPath() (string, error) {
e, err := key.ProfileKey.defaultPath()
return e + "/rules", err
}
func (key ProfileRulesKey) valueType() (reflect.Type, error) {
return reflect.TypeOf(ProfileRules{}), nil
}
func (key ProfileRulesKey) String() string {
return fmt.Sprintf("ProfileRules(name=%s)", key.Name)
}
// ProfileLabelsKey implements the KeyInterface for the profile labels
type ProfileLabelsKey struct {
ProfileKey
}
func (key ProfileLabelsKey) defaultPath() (string, error) {
e, err := key.ProfileKey.defaultPath()
return e + "/labels", err
}
func (key ProfileLabelsKey) valueType() (reflect.Type, error) {
return reflect.TypeOf(map[string]string{}), nil
}
func (key ProfileLabelsKey) String() string {
return fmt.Sprintf("ProfileLabels(name=%s)", key.Name)
}
type ProfileListOptions struct {
Name string
}
func (options ProfileListOptions) defaultPathRoot() string {
k := "/calico/v1/policy/profile"
if options.Name == "" {
return k
}
k = k + fmt.Sprintf("/%s", escapeName(options.Name))
return k
}
func (options ProfileListOptions) KeyFromDefaultPath(path string) Key {
log.Debugf("Get Profile key from %s", path)
r := matchProfile.FindAllStringSubmatch(path, -1)
if len(r) != 1 {
log.Debugf("Didn't match regex")
return nil
}
name := unescapeName(r[0][1])
kind := r[0][2]
if options.Name != "" && name != options.Name {
log.Debugf("Didn't match name %s != %s", options.Name, name)
return nil
}
pk := ProfileKey{Name: name}
switch kind {
case "labels":
return ProfileLabelsKey{ProfileKey: pk}
case "rules":
return ProfileRulesKey{ProfileKey: pk}
}
return pk
}
// The profile structure is defined to allow the client to define a conversion interface
// to map between the API and backend profiles. However, in the actual underlying
// implementation the profile is written as three separate entries - rules, tags and labels.
type Profile struct {
Rules ProfileRules
Tags []string
Labels map[string]string
}
type ProfileRules struct {
InboundRules []Rule `json:"inbound_rules,omitempty" validate:"omitempty,dive"`
OutboundRules []Rule `json:"outbound_rules,omitempty" validate:"omitempty,dive"`
}
func (_ *ProfileListOptions) ListConvert(ds []*KVPair) []*KVPair {
profiles := make(map[string]*KVPair)
var name string
for _, d := range ds {
switch t := d.Key.(type) {
case ProfileLabelsKey:
name = t.Name
case ProfileRulesKey:
name = t.Name
default:
panic(fmt.Errorf("Unexpected key type: %v", t))
}
// Get the KVPair for the profile, initialising if just created.
pd, ok := profiles[name]
if !ok {
log.Debugf("Initialise profile %v", name)
pd = &KVPair{
Value: &Profile{},
Key: ProfileKey{Name: name},
}
profiles[name] = pd
}
p := pd.Value.(*Profile)
switch t := d.Value.(type) {
case map[string]string: // must be labels
log.Debugf("Store labels %v", t)
p.Labels = t
case *ProfileRules: // must be rules
log.Debugf("Store rules %v", t)
p.Rules = *t
default:
panic(fmt.Errorf("Unexpected type: %v", t))
}
pd.Value = p
}
log.Debugf("Map of profiles: %v", profiles)
// To store the keys in slice in sorted order
var keys []string
for k := range profiles {
keys = append(keys, k)
}
sort.Strings(keys)
out := make([]*KVPair, len(keys))
for i, k := range keys {
out[i] = profiles[k]
}
log.Debugf("Sorted groups of profiles: %v", out)
return out
}

View File

@@ -0,0 +1,34 @@
// Copyright (c) 2018 Tigera, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import "fmt"
const (
NoRegion string = "no-region"
RegionPrefix string = "region-"
)
func RegionString(region string) string {
if region != "" {
return RegionPrefix + region
} else {
return NoRegion
}
}
func ErrorSlashInRegionString(regionString string) error {
return fmt.Errorf("RegionString %v is invalid because it includes a slash", regionString)
}

View File

@@ -0,0 +1,321 @@
// Copyright (c) 2016-2021 Tigera, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import (
"fmt"
"reflect"
"regexp"
"strings"
log "github.com/sirupsen/logrus"
kapiv1 "k8s.io/api/core/v1"
discovery "k8s.io/api/discovery/v1"
apiv3 "github.com/projectcalico/api/pkg/apis/projectcalico/v3"
libapiv3 "github.com/projectcalico/calico/libcalico-go/lib/apis/v3"
"github.com/projectcalico/calico/libcalico-go/lib/namespace"
)
// Name/type information about a single resource.
type resourceInfo struct {
typeOf reflect.Type
plural string
kindLower string
kind string
}
var (
matchGlobalResource = regexp.MustCompile("^/calico/resources/v3/projectcalico[.]org/([^/]+)/([^/]+)$")
matchNamespacedResource = regexp.MustCompile("^/calico/resources/v3/projectcalico[.]org/([^/]+)/([^/]+)/([^/]+)$")
resourceInfoByKindLower = make(map[string]resourceInfo)
resourceInfoByPlural = make(map[string]resourceInfo)
)
func registerResourceInfo(kind string, plural string, typeOf reflect.Type) {
kindLower := strings.ToLower(kind)
plural = strings.ToLower(plural)
ri := resourceInfo{
typeOf: typeOf,
kindLower: kindLower,
kind: kind,
plural: plural,
}
resourceInfoByKindLower[kindLower] = ri
resourceInfoByPlural[plural] = ri
}
func init() {
registerResourceInfo(
apiv3.KindBGPPeer,
"bgppeers",
reflect.TypeOf(apiv3.BGPPeer{}),
)
registerResourceInfo(
apiv3.KindBGPConfiguration,
"bgpconfigurations",
reflect.TypeOf(apiv3.BGPConfiguration{}),
)
registerResourceInfo(
apiv3.KindClusterInformation,
"clusterinformations",
reflect.TypeOf(apiv3.ClusterInformation{}),
)
registerResourceInfo(
apiv3.KindFelixConfiguration,
"felixconfigurations",
reflect.TypeOf(apiv3.FelixConfiguration{}),
)
registerResourceInfo(
apiv3.KindGlobalNetworkPolicy,
"globalnetworkpolicies",
reflect.TypeOf(apiv3.GlobalNetworkPolicy{}),
)
registerResourceInfo(
apiv3.KindHostEndpoint,
"hostendpoints",
reflect.TypeOf(apiv3.HostEndpoint{}),
)
registerResourceInfo(
apiv3.KindGlobalNetworkSet,
"globalnetworksets",
reflect.TypeOf(apiv3.GlobalNetworkSet{}),
)
registerResourceInfo(
apiv3.KindIPPool,
"ippools",
reflect.TypeOf(apiv3.IPPool{}),
)
registerResourceInfo(
apiv3.KindIPReservation,
"ipreservations",
reflect.TypeOf(apiv3.IPReservation{}),
)
registerResourceInfo(
apiv3.KindNetworkPolicy,
"networkpolicies",
reflect.TypeOf(apiv3.NetworkPolicy{}),
)
registerResourceInfo(
KindKubernetesNetworkPolicy,
"kubernetesnetworkpolicies",
reflect.TypeOf(apiv3.NetworkPolicy{}),
)
registerResourceInfo(
KindKubernetesEndpointSlice,
"kubernetesendpointslices",
reflect.TypeOf(discovery.EndpointSlice{}),
)
registerResourceInfo(
apiv3.KindNetworkSet,
"networksets",
reflect.TypeOf(apiv3.NetworkSet{}),
)
registerResourceInfo(
libapiv3.KindNode,
"nodes",
reflect.TypeOf(libapiv3.Node{}),
)
registerResourceInfo(
apiv3.KindCalicoNodeStatus,
"caliconodestatuses",
reflect.TypeOf(apiv3.CalicoNodeStatus{}),
)
registerResourceInfo(
apiv3.KindProfile,
"profiles",
reflect.TypeOf(apiv3.Profile{}),
)
registerResourceInfo(
libapiv3.KindWorkloadEndpoint,
"workloadendpoints",
reflect.TypeOf(libapiv3.WorkloadEndpoint{}),
)
registerResourceInfo(
libapiv3.KindIPAMConfig,
"ipamconfigs",
reflect.TypeOf(libapiv3.IPAMConfig{}),
)
registerResourceInfo(
apiv3.KindKubeControllersConfiguration,
"kubecontrollersconfigurations",
reflect.TypeOf(apiv3.KubeControllersConfiguration{}))
registerResourceInfo(
KindKubernetesService,
"kubernetesservice",
reflect.TypeOf(kapiv1.Service{}),
)
registerResourceInfo(
libapiv3.KindBlockAffinity,
"blockaffinities",
reflect.TypeOf(libapiv3.BlockAffinity{}),
)
registerResourceInfo(
apiv3.KindBGPFilter,
"BGPFilters",
reflect.TypeOf(apiv3.BGPFilter{}),
)
}
type ResourceKey struct {
// The name of the resource.
Name string
// The namespace of the resource. Not required if the resource is not namespaced.
Namespace string
// The resource kind.
Kind string
}
func (key ResourceKey) defaultPath() (string, error) {
return key.defaultDeletePath()
}
func (key ResourceKey) defaultDeletePath() (string, error) {
ri, ok := resourceInfoByKindLower[strings.ToLower(key.Kind)]
if !ok {
return "", fmt.Errorf("couldn't convert key: %+v", key)
}
if namespace.IsNamespaced(key.Kind) {
return fmt.Sprintf("/calico/resources/v3/projectcalico.org/%s/%s/%s", ri.plural, key.Namespace, key.Name), nil
}
return fmt.Sprintf("/calico/resources/v3/projectcalico.org/%s/%s", ri.plural, key.Name), nil
}
func (key ResourceKey) defaultDeleteParentPaths() ([]string, error) {
return nil, nil
}
func (key ResourceKey) valueType() (reflect.Type, error) {
ri, ok := resourceInfoByKindLower[strings.ToLower(key.Kind)]
if !ok {
return nil, fmt.Errorf("Unexpected resource kind: " + key.Kind)
}
return ri.typeOf, nil
}
func (key ResourceKey) String() string {
if namespace.IsNamespaced(key.Kind) {
return fmt.Sprintf("%s(%s/%s)", key.Kind, key.Namespace, key.Name)
}
return fmt.Sprintf("%s(%s)", key.Kind, key.Name)
}
type ResourceListOptions struct {
// The name of the resource.
Name string
// The namespace of the resource. Not required if the resource is not namespaced.
Namespace string
// The resource kind.
Kind string
// Whether the name is prefix rather than the full name.
Prefix bool
}
// If the Kind, Namespace and Name are specified, but the Name is a prefix then the
// last segment of this path is a prefix.
func (options ResourceListOptions) IsLastSegmentIsPrefix() bool {
return len(options.Kind) != 0 &&
(len(options.Namespace) != 0 || !namespace.IsNamespaced(options.Kind)) &&
len(options.Name) != 0 &&
options.Prefix
}
func (options ResourceListOptions) KeyFromDefaultPath(path string) Key {
ri, ok := resourceInfoByKindLower[strings.ToLower(options.Kind)]
if !ok {
log.Panic("Unexpected resource kind: " + options.Kind)
}
if namespace.IsNamespaced(options.Kind) {
log.Debugf("Get Namespaced Resource key from %s", path)
r := matchNamespacedResource.FindAllStringSubmatch(path, -1)
if len(r) != 1 {
log.Debugf("Didn't match regex")
return nil
}
kindPlural := r[0][1]
namespace := r[0][2]
name := r[0][3]
if len(options.Kind) == 0 {
panic("Kind must be specified in List option but is not")
}
if kindPlural != ri.plural {
log.Debugf("Didn't match kind %s != %s", kindPlural, kindPlural)
return nil
}
if len(options.Namespace) != 0 && namespace != options.Namespace {
log.Debugf("Didn't match namespace %s != %s", options.Namespace, namespace)
return nil
}
if len(options.Name) != 0 {
if options.Prefix && !strings.HasPrefix(name, options.Name) {
log.Debugf("Didn't match name prefix %s != prefix(%s)", options.Name, name)
return nil
} else if !options.Prefix && name != options.Name {
log.Debugf("Didn't match name %s != %s", options.Name, name)
return nil
}
}
return ResourceKey{Kind: options.Kind, Namespace: namespace, Name: name}
}
log.Debugf("Get Global Resource key from %s", path)
r := matchGlobalResource.FindAllStringSubmatch(path, -1)
if len(r) != 1 {
log.Debugf("Didn't match regex")
return nil
}
kindPlural := r[0][1]
name := r[0][2]
if kindPlural != ri.plural {
log.Debugf("Didn't match kind %s != %s", kindPlural, ri.plural)
return nil
}
if len(options.Name) != 0 {
if options.Prefix && !strings.HasPrefix(name, options.Name) {
log.Debugf("Didn't match name prefix %s != prefix(%s)", options.Name, name)
return nil
} else if !options.Prefix && name != options.Name {
log.Debugf("Didn't match name %s != %s", options.Name, name)
return nil
}
}
return ResourceKey{Kind: options.Kind, Name: name}
}
func (options ResourceListOptions) defaultPathRoot() string {
ri, ok := resourceInfoByKindLower[strings.ToLower(options.Kind)]
if !ok {
log.Panic("Unexpected resource kind: " + options.Kind)
}
k := "/calico/resources/v3/projectcalico.org/" + ri.plural
if namespace.IsNamespaced(options.Kind) {
if options.Namespace == "" {
return k
}
k = k + "/" + options.Namespace
}
if options.Name == "" {
return k
}
return k + "/" + options.Name
}
func (options ResourceListOptions) String() string {
return options.Kind
}

View File

@@ -0,0 +1,268 @@
// Copyright (c) 2016-2018 Tigera, Inc. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import (
"fmt"
"strconv"
"strings"
apiv3 "github.com/projectcalico/api/pkg/apis/projectcalico/v3"
"github.com/projectcalico/api/pkg/lib/numorstring"
"github.com/projectcalico/calico/libcalico-go/lib/net"
)
type Rule struct {
Action string `json:"action,omitempty"`
IPVersion *int `json:"ip_version,omitempty" validate:"omitempty,ipVersion"`
Protocol *numorstring.Protocol `json:"protocol,omitempty" validate:"omitempty"`
NotProtocol *numorstring.Protocol `json:"!protocol,omitempty" validate:"omitempty"`
// ICMP validation notes: 0 is a valid (common) ICMP type and code. Type = 255 is not assigned
// to any protocol and the Linux kernel doesn't support matching on it so we validate against
// it.
ICMPType *int `json:"icmp_type,omitempty" validate:"omitempty,gte=0,lt=255"`
ICMPCode *int `json:"icmp_code,omitempty" validate:"omitempty,gte=0,lte=255"`
NotICMPType *int `json:"!icmp_type,omitempty" validate:"omitempty,gte=0,lt=255"`
NotICMPCode *int `json:"!icmp_code,omitempty" validate:"omitempty,gte=0,lte=255"`
SrcTag string `json:"src_tag,omitempty" validate:"omitempty,tag"`
SrcNet *net.IPNet `json:"src_net,omitempty" validate:"omitempty"`
SrcNets []*net.IPNet `json:"src_nets,omitempty" validate:"omitempty"`
SrcSelector string `json:"src_selector,omitempty" validate:"omitempty,selector"`
SrcPorts []numorstring.Port `json:"src_ports,omitempty" validate:"omitempty,dive"`
SrcService string `json:"src_service,omitempty" validate:"omitempty"`
SrcServiceNamespace string `json:"src_service_ns,omitempty" validate:"omitempty"`
DstTag string `json:"dst_tag,omitempty" validate:"omitempty,tag"`
DstSelector string `json:"dst_selector,omitempty" validate:"omitempty,selector"`
DstNet *net.IPNet `json:"dst_net,omitempty" validate:"omitempty"`
DstNets []*net.IPNet `json:"dst_nets,omitempty" validate:"omitempty"`
DstPorts []numorstring.Port `json:"dst_ports,omitempty" validate:"omitempty,dive"`
DstService string `json:"dst_service,omitempty" validate:"omitempty"`
DstServiceNamespace string `json:"dst_service_ns,omitempty" validate:"omitempty"`
NotSrcTag string `json:"!src_tag,omitempty" validate:"omitempty,tag"`
NotSrcNet *net.IPNet `json:"!src_net,omitempty" validate:"omitempty"`
NotSrcNets []*net.IPNet `json:"!src_nets,omitempty" validate:"omitempty"`
NotSrcSelector string `json:"!src_selector,omitempty" validate:"omitempty,selector"`
NotSrcPorts []numorstring.Port `json:"!src_ports,omitempty" validate:"omitempty,dive"`
NotDstTag string `json:"!dst_tag,omitempty" validate:"omitempty"`
NotDstSelector string `json:"!dst_selector,omitempty" validate:"omitempty,selector"`
NotDstNet *net.IPNet `json:"!dst_net,omitempty" validate:"omitempty"`
NotDstNets []*net.IPNet `json:"!dst_nets,omitempty" validate:"omitempty"`
NotDstPorts []numorstring.Port `json:"!dst_ports,omitempty" validate:"omitempty,dive"`
// These fields allow us to pass through the raw match criteria from the V3 datamodel unmodified.
// The selectors above are formed in the update processor layer by combining the original
// selectors, namespace selectors and service account selectors into one.
OriginalSrcSelector string `json:"orig_src_selector,omitempty" validate:"omitempty,selector"`
OriginalSrcNamespaceSelector string `json:"orig_src_namespace_selector,omitempty" validate:"omitempty,selector"`
OriginalDstSelector string `json:"orig_dst_selector,omitempty" validate:"omitempty,selector"`
OriginalDstNamespaceSelector string `json:"orig_dst_namespace_selector,omitempty" validate:"omitempty,selector"`
OriginalNotSrcSelector string `json:"!orig_src_selector,omitempty" validate:"omitempty,selector"`
OriginalNotDstSelector string `json:"!orig_dst_selector,omitempty" validate:"omitempty,selector"`
OriginalSrcServiceAccountNames []string `json:"orig_src_service_acct_names,omitempty" validate:"omitempty"`
OriginalSrcServiceAccountSelector string `json:"orig_src_service_acct_selector,omitempty" validate:"omitempty,selector"`
OriginalDstServiceAccountNames []string `json:"orig_dst_service_acct_names,omitempty" validate:"omitempty"`
OriginalDstServiceAccountSelector string `json:"orig_dst_service_acct_selector,omitempty" validate:"omitempty,selector"`
// These fields allow us to pass through application layer selectors from the V3 datamodel.
HTTPMatch *HTTPMatch `json:"http,omitempty" validate:"omitempty"`
LogPrefix string `json:"log_prefix,omitempty" validate:"omitempty"`
Metadata *RuleMetadata `json:"metadata,omitempty" validate:"omitempty"`
}
type HTTPMatch struct {
Methods []string `json:"methods,omitempty" validate:"omitempty"`
Paths []apiv3.HTTPPath `json:"paths,omitempty" validate:"omitempty"`
}
type RuleMetadata struct {
Annotations map[string]string `json:"annotations,omitempty"`
}
func combineNets(n *net.IPNet, nets []*net.IPNet) []*net.IPNet {
if n == nil {
return nets
}
if len(nets) == 0 {
return []*net.IPNet{n}
}
var combination = make([]*net.IPNet, len(nets)+1)
copy(combination, nets)
combination[len(nets)] = n
return combination
}
func (r Rule) AllSrcNets() []*net.IPNet {
return combineNets(r.SrcNet, r.SrcNets)
}
func (r Rule) AllDstNets() []*net.IPNet {
return combineNets(r.DstNet, r.DstNets)
}
func (r Rule) AllNotSrcNets() []*net.IPNet {
return combineNets(r.NotSrcNet, r.NotSrcNets)
}
func (r Rule) AllNotDstNets() []*net.IPNet {
return combineNets(r.NotDstNet, r.NotDstNets)
}
func joinNets(nets []*net.IPNet) string {
parts := make([]string, len(nets))
for i, n := range nets {
parts[i] = n.String()
}
return strings.Join(parts, ",")
}
func (r Rule) String() string {
parts := make([]string, 0)
// Action.
if r.Action != "" {
parts = append(parts, r.Action)
} else {
parts = append(parts, "Allow")
}
// Global packet attributes that don't depend on direction.
if r.Protocol != nil {
parts = append(parts, r.Protocol.String())
}
if r.NotProtocol != nil {
parts = append(parts, "!"+r.NotProtocol.String())
}
if r.ICMPType != nil {
parts = append(parts, "type", strconv.Itoa(*r.ICMPType))
}
if r.ICMPCode != nil {
parts = append(parts, "code", strconv.Itoa(*r.ICMPCode))
}
if r.NotICMPType != nil {
parts = append(parts, "!type", strconv.Itoa(*r.NotICMPType))
}
if r.NotICMPCode != nil {
parts = append(parts, "!code", strconv.Itoa(*r.NotICMPCode))
}
{
// Source attributes. New block ensures that fromParts goes out-of-scope before
// we calculate toParts. This prevents copy/paste errors.
fromParts := make([]string, 0)
if len(r.SrcPorts) > 0 {
srcPorts := make([]string, len(r.SrcPorts))
for ii, port := range r.SrcPorts {
srcPorts[ii] = port.String()
}
fromParts = append(fromParts, "ports", strings.Join(srcPorts, ","))
}
if r.SrcTag != "" {
fromParts = append(fromParts, "tag", r.SrcTag)
}
if r.SrcSelector != "" {
fromParts = append(fromParts, "selector", fmt.Sprintf("%#v", r.SrcSelector))
}
srcNets := r.AllSrcNets()
if len(srcNets) != 0 {
fromParts = append(fromParts, "cidr", joinNets(srcNets))
}
if len(r.NotSrcPorts) > 0 {
notSrcPorts := make([]string, len(r.NotSrcPorts))
for ii, port := range r.NotSrcPorts {
notSrcPorts[ii] = port.String()
}
fromParts = append(fromParts, "!ports", strings.Join(notSrcPorts, ","))
}
if r.NotSrcTag != "" {
fromParts = append(fromParts, "!tag", r.NotSrcTag)
}
if r.NotSrcSelector != "" {
fromParts = append(fromParts, "!selector", fmt.Sprintf("%#v", r.NotSrcSelector))
}
notSrcNets := r.AllNotSrcNets()
if len(notSrcNets) != 0 {
fromParts = append(fromParts, "!cidr", joinNets(notSrcNets))
}
if len(fromParts) > 0 {
parts = append(parts, "from")
parts = append(parts, fromParts...)
}
}
{
// Destination attributes.
toParts := make([]string, 0)
if len(r.DstPorts) > 0 {
DstPorts := make([]string, len(r.DstPorts))
for ii, port := range r.DstPorts {
DstPorts[ii] = port.String()
}
toParts = append(toParts, "ports", strings.Join(DstPorts, ","))
}
if r.DstTag != "" {
toParts = append(toParts, "tag", r.DstTag)
}
if r.DstSelector != "" {
toParts = append(toParts, "selector", fmt.Sprintf("%#v", r.DstSelector))
}
dstNets := r.AllDstNets()
if len(dstNets) != 0 {
toParts = append(toParts, "cidr", joinNets(dstNets))
}
if len(r.NotDstPorts) > 0 {
notDstPorts := make([]string, len(r.NotDstPorts))
for ii, port := range r.NotDstPorts {
notDstPorts[ii] = port.String()
}
toParts = append(toParts, "!ports", strings.Join(notDstPorts, ","))
}
if r.NotDstTag != "" {
toParts = append(toParts, "!tag", r.NotDstTag)
}
if r.NotDstSelector != "" {
toParts = append(toParts, "!selector", fmt.Sprintf("%#v", r.NotDstSelector))
}
notDstNets := r.AllNotDstNets()
if len(notDstNets) != 0 {
toParts = append(toParts, "!cidr", joinNets(notDstNets))
}
// HTTPMatch are destination rules.
if r.HTTPMatch != nil {
if len(r.HTTPMatch.Methods) > 0 {
toParts = append(toParts, "httpMethods", fmt.Sprintf("%+v", r.HTTPMatch.Methods))
}
if len(r.HTTPMatch.Paths) > 0 {
toParts = append(toParts, "httpPaths", fmt.Sprintf("%+v", r.HTTPMatch.Paths))
}
}
if len(toParts) > 0 {
parts = append(parts, "to")
parts = append(parts, toParts...)
}
}
return strings.Join(parts, " ")
}

View File

@@ -0,0 +1,184 @@
// Copyright (c) 2016-2018 Tigera, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import (
"fmt"
"reflect"
"regexp"
"strings"
log "github.com/sirupsen/logrus"
"github.com/projectcalico/calico/libcalico-go/lib/errors"
)
var (
matchActiveStatusReport = regexp.MustCompile("^/?calico/felix/v2/([^/]+)/host/([^/]+)/status$")
matchLastStatusReport = regexp.MustCompile("^/?calico/felix/v2/([^/]+)/host/([^/]+)/last_reported_status")
typeStatusReport = reflect.TypeOf(StatusReport{})
)
type ActiveStatusReportKey struct {
Hostname string `json:"-" validate:"required,hostname"`
RegionString string
}
func (key ActiveStatusReportKey) defaultPath() (string, error) {
return key.defaultDeletePath()
}
func (key ActiveStatusReportKey) defaultDeletePath() (string, error) {
if key.Hostname == "" {
return "", errors.ErrorInsufficientIdentifiers{Name: "hostname"}
}
if key.RegionString == "" {
return "", errors.ErrorInsufficientIdentifiers{Name: "regionString"}
}
if strings.Contains(key.RegionString, "/") {
return "", ErrorSlashInRegionString(key.RegionString)
}
e := fmt.Sprintf("/calico/felix/v2/%s/host/%s/status", key.RegionString, key.Hostname)
return e, nil
}
func (key ActiveStatusReportKey) defaultDeleteParentPaths() ([]string, error) {
return nil, nil
}
func (key ActiveStatusReportKey) valueType() (reflect.Type, error) {
return typeStatusReport, nil
}
func (key ActiveStatusReportKey) String() string {
return fmt.Sprintf("StatusReport(hostname=%s)", key.Hostname)
}
type ActiveStatusReportListOptions struct {
Hostname string
RegionString string
}
func (options ActiveStatusReportListOptions) defaultPathRoot() string {
k := "/calico/felix/v2/"
if options.RegionString == "" {
return k
}
k = k + options.RegionString + "/host"
if options.Hostname == "" {
return k
}
k = k + fmt.Sprintf("/%s/status", options.Hostname)
return k
}
func (options ActiveStatusReportListOptions) KeyFromDefaultPath(ekey string) Key {
log.Debugf("Get StatusReport key from %s", ekey)
r := matchActiveStatusReport.FindAllStringSubmatch(ekey, -1)
if len(r) != 1 {
log.Debugf("Didn't match regex")
return nil
}
regionString := r[0][1]
name := r[0][2]
if options.RegionString != "" && regionString != options.RegionString {
log.Debugf("Didn't match region %s != %s", options.RegionString, regionString)
return nil
}
if options.Hostname != "" && name != options.Hostname {
log.Debugf("Didn't match name %s != %s", options.Hostname, name)
return nil
}
return ActiveStatusReportKey{Hostname: name, RegionString: regionString}
}
type LastStatusReportKey struct {
Hostname string `json:"-" validate:"required,hostname"`
RegionString string
}
func (key LastStatusReportKey) defaultPath() (string, error) {
return key.defaultDeletePath()
}
func (key LastStatusReportKey) defaultDeletePath() (string, error) {
if key.Hostname == "" {
return "", errors.ErrorInsufficientIdentifiers{Name: "hostname"}
}
if key.RegionString == "" {
return "", errors.ErrorInsufficientIdentifiers{Name: "regionString"}
}
if strings.Contains(key.RegionString, "/") {
return "", ErrorSlashInRegionString(key.RegionString)
}
e := fmt.Sprintf("/calico/felix/v2/%s/host/%s/last_reported_status", key.RegionString, key.Hostname)
return e, nil
}
func (key LastStatusReportKey) defaultDeleteParentPaths() ([]string, error) {
return nil, nil
}
func (key LastStatusReportKey) valueType() (reflect.Type, error) {
return typeStatusReport, nil
}
func (key LastStatusReportKey) String() string {
return fmt.Sprintf("StatusReport(hostname=%s)", key.Hostname)
}
type LastStatusReportListOptions struct {
Hostname string
RegionString string
}
func (options LastStatusReportListOptions) defaultPathRoot() string {
k := "/calico/felix/v2/"
if options.RegionString == "" {
return k
}
k = k + options.RegionString + "/host"
if options.Hostname == "" {
return k
}
k = k + fmt.Sprintf("/%s/last_reported_status", options.Hostname)
return k
}
func (options LastStatusReportListOptions) KeyFromDefaultPath(ekey string) Key {
log.Debugf("Get StatusReport key from %s", ekey)
r := matchLastStatusReport.FindAllStringSubmatch(ekey, -1)
if len(r) != 1 {
log.Debugf("Didn't match regex")
return nil
}
regionString := r[0][1]
name := r[0][2]
if options.RegionString != "" && regionString != options.RegionString {
log.Debugf("Didn't match region %s != %s", options.RegionString, regionString)
return nil
}
if options.Hostname != "" && name != options.Hostname {
log.Debugf("Didn't match name %s != %s", options.Hostname, name)
return nil
}
return LastStatusReportKey{Hostname: name, RegionString: regionString}
}
type StatusReport struct {
Timestamp string `json:"time"`
UptimeSeconds float64 `json:"uptime"`
FirstUpdate bool `json:"first_update"`
}

View File

@@ -0,0 +1,185 @@
// Copyright (c) 2016-2017 Tigera, Inc. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import (
"fmt"
"regexp"
"reflect"
log "github.com/sirupsen/logrus"
"github.com/projectcalico/api/pkg/lib/numorstring"
"github.com/projectcalico/calico/libcalico-go/lib/errors"
"github.com/projectcalico/calico/libcalico-go/lib/net"
)
var (
matchWorkloadEndpoint = regexp.MustCompile("^/?calico/v1/host/([^/]+)/workload/([^/]+)/([^/]+)/endpoint/([^/]+)$")
)
type WorkloadEndpointKey struct {
Hostname string `json:"-"`
OrchestratorID string `json:"-"`
WorkloadID string `json:"-"`
EndpointID string `json:"-"`
}
func (key WorkloadEndpointKey) defaultPath() (string, error) {
if key.Hostname == "" {
return "", errors.ErrorInsufficientIdentifiers{Name: "node"}
}
if key.OrchestratorID == "" {
return "", errors.ErrorInsufficientIdentifiers{Name: "orchestrator"}
}
if key.WorkloadID == "" {
return "", errors.ErrorInsufficientIdentifiers{Name: "workload"}
}
if key.EndpointID == "" {
return "", errors.ErrorInsufficientIdentifiers{Name: "name"}
}
return fmt.Sprintf("/calico/v1/host/%s/workload/%s/%s/endpoint/%s",
key.Hostname, escapeName(key.OrchestratorID), escapeName(key.WorkloadID), escapeName(key.EndpointID)), nil
}
func (key WorkloadEndpointKey) defaultDeletePath() (string, error) {
return key.defaultPath()
}
func (key WorkloadEndpointKey) defaultDeleteParentPaths() ([]string, error) {
if key.Hostname == "" {
return nil, errors.ErrorInsufficientIdentifiers{Name: "node"}
}
if key.OrchestratorID == "" {
return nil, errors.ErrorInsufficientIdentifiers{Name: "orchestrator"}
}
if key.WorkloadID == "" {
return nil, errors.ErrorInsufficientIdentifiers{Name: "workload"}
}
workload := fmt.Sprintf("/calico/v1/host/%s/workload/%s/%s",
key.Hostname, escapeName(key.OrchestratorID), escapeName(key.WorkloadID))
endpoints := workload + "/endpoint"
return []string{endpoints, workload}, nil
}
func (key WorkloadEndpointKey) valueType() (reflect.Type, error) {
return reflect.TypeOf(WorkloadEndpoint{}), nil
}
func (key WorkloadEndpointKey) String() string {
return fmt.Sprintf("WorkloadEndpoint(node=%s, orchestrator=%s, workload=%s, name=%s)",
key.Hostname, key.OrchestratorID, key.WorkloadID, key.EndpointID)
}
type WorkloadEndpointListOptions struct {
Hostname string
OrchestratorID string
WorkloadID string
EndpointID string
}
func (options WorkloadEndpointListOptions) defaultPathRoot() string {
k := "/calico/v1/host"
if options.Hostname == "" {
return k
}
k = k + fmt.Sprintf("/%s/workload", options.Hostname)
if options.OrchestratorID == "" {
return k
}
k = k + fmt.Sprintf("/%s", escapeName(options.OrchestratorID))
if options.WorkloadID == "" {
return k
}
k = k + fmt.Sprintf("/%s/endpoint", escapeName(options.WorkloadID))
if options.EndpointID == "" {
return k
}
k = k + fmt.Sprintf("/%s", escapeName(options.EndpointID))
return k
}
func (options WorkloadEndpointListOptions) KeyFromDefaultPath(path string) Key {
log.Debugf("Get WorkloadEndpoint key from %s", path)
r := matchWorkloadEndpoint.FindAllStringSubmatch(path, -1)
if len(r) != 1 {
log.Debugf("Didn't match regex")
return nil
}
hostname := r[0][1]
orch := unescapeName(r[0][2])
workload := unescapeName(r[0][3])
endpointID := unescapeName(r[0][4])
if options.Hostname != "" && hostname != options.Hostname {
log.Debugf("Didn't match hostname %s != %s", options.Hostname, hostname)
return nil
}
if options.OrchestratorID != "" && orch != options.OrchestratorID {
log.Debugf("Didn't match orchestrator %s != %s", options.OrchestratorID, orch)
return nil
}
if options.WorkloadID != "" && workload != options.WorkloadID {
log.Debugf("Didn't match workload %s != %s", options.WorkloadID, workload)
return nil
}
if options.EndpointID != "" && endpointID != options.EndpointID {
log.Debugf("Didn't match endpoint ID %s != %s", options.EndpointID, endpointID)
return nil
}
return WorkloadEndpointKey{
Hostname: hostname,
OrchestratorID: orch,
WorkloadID: workload,
EndpointID: endpointID,
}
}
type WorkloadEndpoint struct {
State string `json:"state"`
Name string `json:"name"`
ActiveInstanceID string `json:"active_instance_id"`
Mac *net.MAC `json:"mac"`
ProfileIDs []string `json:"profile_ids"`
IPv4Nets []net.IPNet `json:"ipv4_nets"`
IPv6Nets []net.IPNet `json:"ipv6_nets"`
IPv4NAT []IPNAT `json:"ipv4_nat,omitempty"`
IPv6NAT []IPNAT `json:"ipv6_nat,omitempty"`
Labels map[string]string `json:"labels,omitempty"`
IPv4Gateway *net.IP `json:"ipv4_gateway,omitempty" validate:"omitempty,ipv4"`
IPv6Gateway *net.IP `json:"ipv6_gateway,omitempty" validate:"omitempty,ipv6"`
Ports []EndpointPort `json:"ports,omitempty" validate:"dive"`
GenerateName string `json:"generate_name,omitempty"`
AllowSpoofedSourcePrefixes []net.IPNet `json:"allow_spoofed_source_ips,omitempty"`
Annotations map[string]string `json:"annotations,omitempty"`
}
type EndpointPort struct {
Name string `json:"name" validate:"name"`
Protocol numorstring.Protocol `json:"protocol"`
Port uint16 `json:"port" validate:"gt=0"`
}
// IPNat contains a single NAT mapping for a WorkloadEndpoint resource.
type IPNAT struct {
// The internal IP address which must be associated with the owning endpoint via the
// configured IPNetworks for the endpoint.
IntIP net.IP `json:"int_ip" validate:"ip"`
// The external IP address.
ExtIP net.IP `json:"ext_ip" validate:"ip"`
}

View File

@@ -0,0 +1,178 @@
// Copyright (c) 2016-2018 Tigera, Inc. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import (
"fmt"
"strings"
"regexp"
"reflect"
log "github.com/sirupsen/logrus"
"github.com/projectcalico/calico/libcalico-go/lib/errors"
)
var (
matchWorkloadEndpointStatus = regexp.MustCompile("^/?calico/felix/v2/([^/]+)/host/([^/]+)/workload/([^/]+)/([^/]+)/endpoint/([^/]+)$")
)
type WorkloadEndpointStatusKey struct {
Hostname string `json:"-"`
OrchestratorID string `json:"-"`
WorkloadID string `json:"-"`
EndpointID string `json:"-"`
RegionString string
}
func (key WorkloadEndpointStatusKey) defaultPath() (string, error) {
if key.Hostname == "" {
return "", errors.ErrorInsufficientIdentifiers{Name: "hostname"}
}
if key.OrchestratorID == "" {
return "", errors.ErrorInsufficientIdentifiers{Name: "orchestrator"}
}
if key.WorkloadID == "" {
return "", errors.ErrorInsufficientIdentifiers{Name: "workload"}
}
if key.EndpointID == "" {
return "", errors.ErrorInsufficientIdentifiers{Name: "endpoint"}
}
if key.RegionString == "" {
return "", errors.ErrorInsufficientIdentifiers{Name: "regionString"}
}
if strings.Contains(key.RegionString, "/") {
return "", ErrorSlashInRegionString(key.RegionString)
}
return fmt.Sprintf("/calico/felix/v2/%s/host/%s/workload/%s/%s/endpoint/%s",
key.RegionString,
key.Hostname, escapeName(key.OrchestratorID), escapeName(key.WorkloadID), escapeName(key.EndpointID)), nil
}
func (key WorkloadEndpointStatusKey) defaultDeletePath() (string, error) {
return key.defaultPath()
}
func (key WorkloadEndpointStatusKey) defaultDeleteParentPaths() ([]string, error) {
if key.Hostname == "" {
return nil, errors.ErrorInsufficientIdentifiers{Name: "hostname"}
}
if key.OrchestratorID == "" {
return nil, errors.ErrorInsufficientIdentifiers{Name: "orchestrator"}
}
if key.WorkloadID == "" {
return nil, errors.ErrorInsufficientIdentifiers{Name: "workload"}
}
if key.RegionString == "" {
return nil, errors.ErrorInsufficientIdentifiers{Name: "regionString"}
}
if strings.Contains(key.RegionString, "/") {
return nil, ErrorSlashInRegionString(key.RegionString)
}
workload := fmt.Sprintf("/calico/felix/v2/%s/host/%s/workload/%s/%s",
key.RegionString,
key.Hostname, escapeName(key.OrchestratorID), escapeName(key.WorkloadID))
endpoints := workload + "/endpoint"
return []string{endpoints, workload}, nil
}
func (key WorkloadEndpointStatusKey) valueType() (reflect.Type, error) {
return reflect.TypeOf(WorkloadEndpointStatus{}), nil
}
func (key WorkloadEndpointStatusKey) String() string {
return fmt.Sprintf("WorkloadEndpointStatus(hostname=%s, orchestrator=%s, workload=%s, name=%s)",
key.Hostname, key.OrchestratorID, key.WorkloadID, key.EndpointID)
}
type WorkloadEndpointStatusListOptions struct {
Hostname string
OrchestratorID string
WorkloadID string
EndpointID string
RegionString string
}
func (options WorkloadEndpointStatusListOptions) defaultPathRoot() string {
k := "/calico/felix/v2/"
if options.RegionString == "" {
return k
}
k = k + options.RegionString + "/host"
if options.Hostname == "" {
return k
}
k = k + fmt.Sprintf("/%s/workload", options.Hostname)
if options.OrchestratorID == "" {
return k
}
k = k + fmt.Sprintf("/%s", escapeName(options.OrchestratorID))
if options.WorkloadID == "" {
return k
}
k = k + fmt.Sprintf("/%s/endpoint", escapeName(options.WorkloadID))
if options.EndpointID == "" {
return k
}
k = k + fmt.Sprintf("/%s", escapeName(options.EndpointID))
return k
}
func (options WorkloadEndpointStatusListOptions) KeyFromDefaultPath(ekey string) Key {
log.Debugf("Get WorkloadEndpoint key from %s", ekey)
r := matchWorkloadEndpointStatus.FindAllStringSubmatch(ekey, -1)
if len(r) != 1 {
log.Debugf("Didn't match regex")
return nil
}
regionString := r[0][1]
hostname := r[0][2]
orchID := unescapeName(r[0][3])
workloadID := unescapeName(r[0][4])
endpointID := unescapeName(r[0][5])
if options.RegionString != "" && regionString != options.RegionString {
log.Debugf("Didn't match region %s != %s", options.RegionString, regionString)
return nil
}
if options.Hostname != "" && hostname != options.Hostname {
log.Debugf("Didn't match hostname %s != %s", options.Hostname, hostname)
return nil
}
if options.OrchestratorID != "" && orchID != options.OrchestratorID {
log.Debugf("Didn't match orchestrator %s != %s", options.OrchestratorID, orchID)
return nil
}
if options.WorkloadID != "" && workloadID != options.WorkloadID {
log.Debugf("Didn't match workload %s != %s", options.WorkloadID, workloadID)
return nil
}
if options.EndpointID != "" && endpointID != options.EndpointID {
log.Debugf("Didn't match endpoint ID %s != %s", options.EndpointID, endpointID)
return nil
}
return WorkloadEndpointStatusKey{
Hostname: hostname,
OrchestratorID: orchID,
WorkloadID: workloadID,
EndpointID: endpointID,
RegionString: regionString,
}
}
type WorkloadEndpointStatus struct {
Status string `json:"status"`
}

View File

@@ -0,0 +1,22 @@
// Copyright (c) 2016 Tigera, Inc. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
Package errors implements various error types that are used both internally, and that
may be returned from the client interface.
Errors returned by the client that are not covered by these errors can be considered
as general internal failures.
*/
package errors

View File

@@ -0,0 +1,310 @@
// Copyright (c) 2020 Tigera, Inc. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package errors
import (
"fmt"
"net/http"
networkingv1 "k8s.io/api/networking/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// Error indicating a problem connecting to the backend.
type ErrorDatastoreError struct {
Err error
Identifier interface{}
}
func (e ErrorDatastoreError) Error() string {
return e.Err.Error()
}
func (e ErrorDatastoreError) Status() metav1.Status {
if i, ok := e.Err.(apierrors.APIStatus); ok {
return i.Status()
}
// Just wrap in a status error.
return metav1.Status{
Status: metav1.StatusFailure,
Code: http.StatusBadRequest,
Reason: metav1.StatusReasonInvalid,
Message: fmt.Sprintf(e.Error()),
Details: &metav1.StatusDetails{
Name: fmt.Sprintf("%v", e.Identifier),
},
}
}
// Error indicating a resource does not exist. Used when attempting to delete or
// update a non-existent resource.
type ErrorResourceDoesNotExist struct {
Err error
Identifier interface{}
}
func (e ErrorResourceDoesNotExist) Error() string {
return fmt.Sprintf("resource does not exist: %v with error: %v", e.Identifier, e.Err)
}
// Error indicating an operation is not supported.
type ErrorOperationNotSupported struct {
Operation string
Identifier interface{}
Reason string
}
func (e ErrorOperationNotSupported) Error() string {
if e.Reason == "" {
return fmt.Sprintf("operation %s is not supported on %v", e.Operation, e.Identifier)
} else {
return fmt.Sprintf("operation %s is not supported on %v: %s", e.Operation, e.Identifier, e.Reason)
}
}
// Error indicating a resource already exists. Used when attempting to create a
// resource that already exists.
type ErrorResourceAlreadyExists struct {
Err error
Identifier interface{}
}
func (e ErrorResourceAlreadyExists) Error() string {
return fmt.Sprintf("resource already exists: %v", e.Identifier)
}
// Error indicating a problem connecting to the backend.
type ErrorConnectionUnauthorized struct {
Err error
}
func (e ErrorConnectionUnauthorized) Error() string {
return fmt.Sprintf("connection is unauthorized: %v", e.Err)
}
// Validation error containing the fields that are failed validation.
type ErrorValidation struct {
ErroredFields []ErroredField
}
type ErroredField struct {
Name string
Value interface{}
Reason string
}
func (e ErroredField) String() string {
var fieldString string
if e.Value == nil {
fieldString = e.Name
} else {
fieldString = fmt.Sprintf("%s = '%v'", e.Name, e.Value)
}
if e.Reason != "" {
fieldString = fmt.Sprintf("%s (%s)", fieldString, e.Reason)
}
return fieldString
}
func (e ErrorValidation) Error() string {
if len(e.ErroredFields) == 0 {
return "unknown validation error"
} else if len(e.ErroredFields) == 1 {
f := e.ErroredFields[0]
return fmt.Sprintf("error with field %s", f)
} else {
s := "error with the following fields:\n"
for _, f := range e.ErroredFields {
s = s + fmt.Sprintf("- %s\n", f)
}
return s
}
}
// Error indicating insufficient identifiers have been supplied on a resource
// management request (create, apply, update, get, delete).
type ErrorInsufficientIdentifiers struct {
Name string
}
func (e ErrorInsufficientIdentifiers) Error() string {
return fmt.Sprintf("insufficient identifiers, missing '%s'", e.Name)
}
// Error indicating an atomic update attempt that failed due to a update conflict.
type ErrorResourceUpdateConflict struct {
Err error
Identifier interface{}
}
func (e ErrorResourceUpdateConflict) Error() string {
return fmt.Sprintf("update conflict: %v", e.Identifier)
}
// Error indicating that the caller has attempted to release an IP address using
// outdated information.
type ErrorBadHandle struct {
Requested string
Expected string
}
func (e ErrorBadHandle) Error() string {
f := "the given handle (%s) does not match (%s) when attempting to release IP"
return fmt.Sprintf(f, e.Requested, e.Expected)
}
// Error indicating that the caller has attempted to release an IP address using
// outdated information.
type ErrorBadSequenceNumber struct {
Requested uint64
Expected uint64
}
func (e ErrorBadSequenceNumber) Error() string {
f := "the given sequence number (%d) does not match (%d) when attempting to release IP"
return fmt.Sprintf(f, e.Requested, e.Expected)
}
// Error indicating that the operation may have partially succeeded, then
// failed, without rolling back. A common example is when a function failed
// in an acceptable way after it successfully wrote some data to the datastore.
type ErrorPartialFailure struct {
Err error
}
func (e ErrorPartialFailure) Error() string {
return fmt.Sprintf("operation partially failed: %v", e.Err)
}
// UpdateErrorIdentifier modifies the supplied error to use the new resource
// identifier.
func UpdateErrorIdentifier(err error, id interface{}) error {
if err == nil {
return nil
}
switch e := err.(type) {
case ErrorDatastoreError:
e.Identifier = id
err = e
case ErrorResourceDoesNotExist:
e.Identifier = id
err = e
case ErrorOperationNotSupported:
e.Identifier = id
err = e
case ErrorResourceAlreadyExists:
e.Identifier = id
err = e
case ErrorResourceUpdateConflict:
e.Identifier = id
err = e
}
return err
}
// Error indicating the datastore has failed to parse an entry.
type ErrorParsingDatastoreEntry struct {
RawKey string
RawValue string
Err error
}
func (e ErrorParsingDatastoreEntry) Error() string {
return fmt.Sprintf("failed to parse datastore entry key=%s; value=%s: %v", e.RawKey, e.RawValue, e.Err)
}
type ErrorPolicyConversionRule struct {
EgressRule *networkingv1.NetworkPolicyEgressRule
IngressRule *networkingv1.NetworkPolicyIngressRule
Reason string
}
func (e ErrorPolicyConversionRule) String() string {
var fieldString string
switch {
case e.EgressRule != nil:
fieldString = fmt.Sprintf("%+v", e.EgressRule)
case e.IngressRule != nil:
fieldString = fmt.Sprintf("%+v", e.IngressRule)
default:
fieldString = "unknown rule"
}
if e.Reason != "" {
fieldString = fmt.Sprintf("%s (%s)", fieldString, e.Reason)
}
return fieldString
}
type ErrorPolicyConversion struct {
PolicyName string
Rules []ErrorPolicyConversionRule
}
func (e *ErrorPolicyConversion) BadEgressRule(rule *networkingv1.NetworkPolicyEgressRule, reason string) {
// Copy rule
badRule := *rule
e.Rules = append(e.Rules, ErrorPolicyConversionRule{
EgressRule: &badRule,
IngressRule: nil,
Reason: reason,
})
}
func (e *ErrorPolicyConversion) BadIngressRule(
rule *networkingv1.NetworkPolicyIngressRule, reason string) {
// Copy rule
badRule := *rule
e.Rules = append(e.Rules, ErrorPolicyConversionRule{
EgressRule: nil,
IngressRule: &badRule,
Reason: reason,
})
}
func (e ErrorPolicyConversion) Error() string {
s := fmt.Sprintf("policy: %s", e.PolicyName)
switch {
case len(e.Rules) == 0:
s += ": unknown policy conversion error"
case len(e.Rules) == 1:
f := e.Rules[0]
s += fmt.Sprintf(": error with rule %s", f)
default:
s += ": error with the following rules:\n"
for _, f := range e.Rules {
s += fmt.Sprintf("- %s\n", f)
}
}
return s
}
func (e ErrorPolicyConversion) GetError() error {
if len(e.Rules) == 0 {
return nil
}
return e
}

View File

@@ -0,0 +1,24 @@
// Copyright (c) 2016 Tigera, Inc. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package errors
import log "github.com/sirupsen/logrus"
// PanicIfErrored logs and panics if the supplied error is non-nil.
func PanicIfErrored(err error, msgformat string, args ...interface{}) {
if err != nil {
log.WithError(err).Panicf(msgformat, args...)
}
}

View File

@@ -0,0 +1,27 @@
// Copyright (c) 2022 Tigera, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package json
import jsoniter "github.com/json-iterator/go"
// Marshal is a drop in replacement for encoding/json.Marshall, which uses jsoniter for better performance.
func Marshal(v any) ([]byte, error) {
return jsoniter.ConfigCompatibleWithStandardLibrary.Marshal(v)
}
// Unmarshal is a drop in replacement for encoding/json.Unmarshal which uses jsoniter for better performance.
func Unmarshal(data []byte, v any) error {
return jsoniter.ConfigCompatibleWithStandardLibrary.Unmarshal(data, v)
}

View File

@@ -0,0 +1,37 @@
// Copyright (c) 2019 Tigera, Inc. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package names
import (
"strings"
"github.com/sirupsen/logrus"
"github.com/projectcalico/calico/libcalico-go/lib/net"
)
// CIDRToName converts a CIDR to a valid resource name.
func CIDRToName(cidr net.IPNet) string {
name := strings.Replace(cidr.String(), ".", "-", 3)
name = strings.Replace(name, ":", "-", 7)
name = strings.Replace(name, "/", "-", 1)
logrus.WithFields(logrus.Fields{
"Name": name,
"IPNet": cidr.String(),
}).Debug("Converted IPNet to resource name")
return name
}

View File

@@ -0,0 +1,16 @@
package names
import (
"os"
"strings"
)
// Hostname returns this hosts hostname, converting to lowercase so that
// it is valid for use in the Calico API.
func Hostname() (string, error) {
if h, err := os.Hostname(); err != nil {
return "", err
} else {
return strings.ToLower(strings.TrimSpace(h)), nil
}
}

View File

@@ -0,0 +1,270 @@
// Copyright (c) 2017 Tigera, Inc. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package names
import (
"errors"
"fmt"
"reflect"
"strings"
cerrors "github.com/projectcalico/calico/libcalico-go/lib/errors"
)
// WorkloadEndpointIdentifiers is a collection of identifiers that are used to uniquely
// identify a WorkloadEndpoint resource. Since a resource is identified by a single
// name field, Calico requires the name to be constructed in a very specific format.
// The format is dependent on the Orchestrator type:
// - k8s: <node>-k8s-<pod>-<endpoint>
// - cni: <node>-cni-<containerID>-<endpoint>
// - libnetwork: <node>-libnetwork-libnetwork-<endpoint>
// - (other): <node>-<orchestrator>-<workload>-<endpoint>
//
// Each parameter cannot start or end with a dash (-), and dashes within the parameter
// will be escaped to a double-dash (--) in the constructed name.
//
// List queries allow for prefix lists (for non-KDD), the client should verify that
// the items returned in the list match the supplied identifiers using the
// NameMatches() method. This is necessary because a prefix match may return endpoints
// that do not exactly match the required identifiers. For example, suppose you are
// querying endpoints with node=node1, orch=k8s, pod=pod and endpoints is wild carded:
// - The name prefix would be `node1-k8s-pod-`
// - A list query using that prefix would also return endpoints with, for example,
// a pod call "pod-1", because the name of the endpoint might be `node1-k8s-pod--1-eth0`
// which matches the required name prefix.
//
// The Node and Orchestrator are always required for both prefix and non-prefix name
// construction.
type WorkloadEndpointIdentifiers struct {
Node string
Orchestrator string
Endpoint string
Workload string
Pod string
ContainerID string
}
// NameMatches returns true if the supplied WorkloadEndpoint name matches the
// supplied identifiers.
// This will return an error if the identifiers are not valid.
func (ids WorkloadEndpointIdentifiers) NameMatches(name string) (bool, error) {
// Extract the required segments for this orchestrator type.
req, err := ids.getSegments()
if err != nil {
return false, err
}
// Extract the parameters from the name.
parts := ExtractDashSeparatedParms(name, len(req))
if len(parts) == 0 {
return false, nil
}
// Check each name segment for a non-match.
for i, r := range req {
if r.value != "" && r.value != parts[i] {
return false, nil
}
}
return true, nil
}
// CalculateWorkloadEndpointName calculates the expected name for a workload
// endpoint given the supplied Spec. Calico requires a precise naming convention
// for workload endpoints that is based on orchestrator and various other orchestrator
// specific parameters.
//
// If allowPrefix is true, we construct the name prefix up to the last specified index
// and terminate with a dash.
func (ids WorkloadEndpointIdentifiers) CalculateWorkloadEndpointName(allowPrefix bool) (string, error) {
req, err := ids.getSegments()
if err != nil {
return "", err
}
parts := []string{}
for _, s := range req {
part := ""
if len(s.value) == 0 {
// This segment has no value associated with it.
if !allowPrefix {
// We are not allowing prefixes. This is an error scenario
return "", cerrors.ErrorValidation{
ErroredFields: []cerrors.ErroredField{
{Name: s.field, Value: s.value, Reason: "field should be assigned"},
},
}
}
// We are allowing prefixes, so return the prefix that we have constructed thus far,
// terminating with a "-".
return strings.Join(parts, "-") + "-", nil
}
part, ef := escapeDashes(s)
if ef != nil {
return "", cerrors.ErrorValidation{ErroredFields: []cerrors.ErroredField{*ef}}
}
parts = append(parts, part)
}
// We have extracted all of the required segments, join the segments with a "-" and
// return that as the name.
return strings.Join(parts, "-"), nil
}
// getSegments returns the ID segments specific to the orchestrator.
func (ids WorkloadEndpointIdentifiers) getSegments() ([]segment, error) {
node := segment{value: ids.Node, field: "node", structField: "Node"}
orch := segment{value: ids.Orchestrator, field: "orchestrator", structField: "Orchestrator"}
cont := segment{value: ids.ContainerID, field: "containerID", structField: "ContainerID"}
pod := segment{value: ids.Pod, field: "pod", structField: "Pod"}
endp := segment{value: ids.Endpoint, field: "endpoint", structField: "Endpoint"}
workl := segment{value: ids.Workload, field: "workload", structField: "Workload"}
// Node is *always* required.
if len(node.value) == 0 {
return nil, cerrors.ErrorValidation{
ErroredFields: []cerrors.ErroredField{
{Name: node.field, Reason: "field should be assigned"},
},
}
}
// Extract the segment values based on the orchestrator.
var segments []segment
switch orch.value {
case "k8s":
segments = []segment{node, orch, pod, endp}
case "cni":
segments = []segment{node, orch, cont, endp}
case "libnetwork":
segments = []segment{node, orch, orch, endp}
default:
segments = []segment{node, orch, workl, endp}
}
return segments, nil
}
// Segment contains the information of a single name segment. The field names
// are geared towards the struct definition of the corresponding resource.
type segment struct {
// The value of the field.
value string
// The JSON/YAML name of the corresponding field in the WorkloadEndpointSpec
field string
// The structure name of the corresponding field in the WorkloadEndpointSpec
structField string
}
// escapeDashes replaces a single dash with a double dash. This type of escaping is
// used for names constructed by joining a set of names with dashes - it assumes that
// each name segment cannot begin or end in a dash.
func escapeDashes(seg segment) (string, *cerrors.ErroredField) {
if seg.value[0] == '-' {
return "", &cerrors.ErroredField{Name: seg.field, Value: seg.value, Reason: "field must not begin with a '-'"}
}
if seg.value[len(seg.value)-1] == '-' {
return "", &cerrors.ErroredField{Name: seg.field, Value: seg.value, Reason: "field must not end with a '-'"}
}
return strings.Replace(seg.value, "-", "--", -1), nil
}
func extractParts(name string) []string {
parts := []string{}
lastDash := -1
for i := 1; i < len(name); i++ {
// Skip non-dashes.
if name[i] != '-' {
continue
}
// Skip over double dashes
if i < len(name)-1 && name[i+1] == '-' {
i++
continue
}
// This is a dash separator.
parts = append(parts, strings.Replace(name[lastDash+1:i], "--", "-", -1))
lastDash = i
}
// Add the last segment.
parts = append(parts, strings.Replace(name[lastDash+1:], "--", "-", -1))
return parts
}
// Extract the dash separated parms from the name. Each parm will have had their dashes escaped,
// this also removes that escaping. Returns nil if the parameters could not be extracted.
func ExtractDashSeparatedParms(name string, numParms int) []string {
// The name must be at least as long as the number of parameters plus the separators.
if len(name) < (2*numParms - 1) {
return nil
}
parts := extractParts(name)
// We should have extracted the correct number of name segments.
if len(parts) != numParms {
return nil
}
return parts
}
var (
k8sFields = []string{"Pod", "Endpoint"}
cniFields = []string{"ContainerID", "Endpoint"}
libnetworkFields = []string{"Orchestrator", "Endpoint"}
otherFields = []string{"Workload", "Endpoint"}
)
// ParseWorkloadEndpointName parses a given name and returns a WorkloadEndpointIdentifiers
// instance with fields populated according to the WorkloadEndpoint name format.
func ParseWorkloadEndpointName(wepName string) (WorkloadEndpointIdentifiers, error) {
if len(wepName) == 0 {
return WorkloadEndpointIdentifiers{}, errors.New("Cannot parse empty string")
}
parts := extractParts(wepName)
if parts == nil || len(parts) == 0 {
return WorkloadEndpointIdentifiers{}, fmt.Errorf("Cannot parse %s", wepName)
}
pl := len(parts)
weid := WorkloadEndpointIdentifiers{Node: parts[0]}
if pl > 1 {
weid.Orchestrator = parts[1]
var orchFlds []string
switch parts[1] {
case "k8s":
orchFlds = k8sFields
case "cni":
orchFlds = cniFields
case "libnetwork":
orchFlds = libnetworkFields
default:
orchFlds = otherFields
}
if pl > 2 {
weidR := reflect.ValueOf(&weid)
weidStruct := weidR.Elem()
for i, part := range parts[2:] {
fld := weidStruct.FieldByName(orchFlds[i])
fld.SetString(part)
}
}
}
return weid, nil
}

View File

@@ -0,0 +1,48 @@
// Copyright (c) 2017-2021 Tigera, Inc. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package namespace
import (
apiv3 "github.com/projectcalico/api/pkg/apis/projectcalico/v3"
libapiv3 "github.com/projectcalico/calico/libcalico-go/lib/apis/v3"
)
const (
// Re-implement the model constants here
// to avoid an import loop.
KindKubernetesNetworkPolicy = "KubernetesNetworkPolicy"
KindKubernetesEndpointSlice = "KubernetesEndpointSlice"
KindKubernetesService = "KubernetesService"
)
func IsNamespaced(kind string) bool {
switch kind {
case libapiv3.KindWorkloadEndpoint, apiv3.KindNetworkPolicy, apiv3.KindNetworkSet:
return true
case KindKubernetesNetworkPolicy:
// KindKubernetesNetworkPolicy is a special-case resource. We don't expose it over the
// v3 API, but it is used in the felix syncer to implement the Kubernetes NetworkPolicy API.
return true
case KindKubernetesEndpointSlice:
// KindKubernetesEndpointSlice is a special-case resource. We don't expose it over the
// v3 API, but it is used in the felix syncer.
return true
case KindKubernetesService:
return true
}
return false
}

View File

@@ -0,0 +1,20 @@
// Copyright (c) 2016 Tigera, Inc. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
Package net implements a set of net types that are extensions to the built-in
net package. The extensions provide additional function such as JSON marshaling
and unmarshaling.
*/
package net

View File

@@ -0,0 +1,135 @@
// Copyright (c) 2016-2021 Tigera, Inc. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package net
import (
"math/big"
"net"
"github.com/projectcalico/calico/libcalico-go/lib/json"
)
// Sub class net.IP so that we can add JSON marshalling and unmarshalling.
type IP struct {
net.IP
}
// MarshalJSON interface for an IP
func (i IP) MarshalJSON() ([]byte, error) {
s, err := i.MarshalText()
if err != nil {
return nil, err
}
return json.Marshal(string(s))
}
// UnmarshalJSON interface for an IP
func (i *IP) UnmarshalJSON(b []byte) error {
var s string
if err := json.Unmarshal(b, &s); err != nil {
return err
}
if err := i.UnmarshalText([]byte(s)); err != nil {
return err
}
// Always return IPv4 values as 4-bytes to be consistent with IPv4 IPNet
// representations.
if ipv4 := i.To4(); ipv4 != nil {
i.IP = ipv4
}
return nil
}
// ParseIP returns an IP from a string
func ParseIP(ip string) *IP {
addr := net.ParseIP(ip)
if addr == nil {
return nil
}
// Always return IPv4 values as 4-bytes to be consistent with IPv4 IPNet
// representations.
if addr4 := addr.To4(); addr4 != nil {
addr = addr4
}
return &IP{addr}
}
// Version returns the IP version for an IP, or 0 if the IP is not valid.
func (i IP) Version() int {
if i.To4() != nil {
return 4
} else if len(i.IP) == net.IPv6len {
return 6
}
return 0
}
// Network returns the IP address as a fully masked IPNet type.
func (i *IP) Network() *IPNet {
// Unmarshaling an IPv4 address returns a 16-byte format of the
// address, so convert to 4-byte format to match the mask.
n := &IPNet{}
if ip4 := i.IP.To4(); ip4 != nil {
n.IP = ip4
n.Mask = net.CIDRMask(net.IPv4len*8, net.IPv4len*8)
} else {
n.IP = i.IP
n.Mask = net.CIDRMask(net.IPv6len*8, net.IPv6len*8)
}
return n
}
// MustParseIP parses the string into an IP.
func MustParseIP(i string) IP {
var ip IP
err := ip.UnmarshalText([]byte(i))
if err != nil {
panic(err)
}
// Always return IPv4 values as 4-bytes to be consistent with IPv4 IPNet
// representations.
if ip4 := ip.To4(); ip4 != nil {
ip.IP = ip4
}
return ip
}
func IPToBigInt(ip IP) *big.Int {
if ip.To4() != nil {
return big.NewInt(0).SetBytes(ip.To4())
} else {
return big.NewInt(0).SetBytes(ip.To16())
}
}
func BigIntToIP(ipInt *big.Int, v6 bool) IP {
var netIP net.IP
// Older versions of this code tried to guess v4/v6 based on the length of the big.Int
// but then we can't tell the difference between 0.0.0.0/0 and ::/0.
if v6 {
netIP = make(net.IP, 16)
} else {
netIP = make(net.IP, 4)
}
ipInt.FillBytes(netIP)
return IP{netIP}
}
func IncrementIP(ip IP, increment *big.Int) IP {
expectingV6 := ip.To4() == nil
sum := big.NewInt(0).Add(IPToBigInt(ip), increment)
return BigIntToIP(sum, expectingV6)
}

View File

@@ -0,0 +1,166 @@
// Copyright (c) 2016-2017,2021 Tigera, Inc. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package net
import (
"math/big"
"net"
"github.com/projectcalico/calico/libcalico-go/lib/json"
)
// Sub class net.IPNet so that we can add JSON marshalling and unmarshalling.
type IPNet struct {
net.IPNet
}
// MarshalJSON interface for an IPNet
func (i IPNet) MarshalJSON() ([]byte, error) {
return json.Marshal(i.String())
}
// UnmarshalJSON interface for an IPNet
func (i *IPNet) UnmarshalJSON(b []byte) error {
var s string
if err := json.Unmarshal(b, &s); err != nil {
return err
}
// Decode and ensure we maintain the full IP address in the IPNet that we return.
ip, ipnet, err := ParseCIDROrIP(s)
if err != nil {
return err
}
i.IP = ip.IP
i.Mask = ipnet.Mask
return nil
}
// Version returns the IP version for an IPNet, or 0 if not a valid IP net.
func (i *IPNet) Version() int {
if i.IP.To4() != nil {
return 4
} else if len(i.IP) == net.IPv6len {
return 6
}
return 0
}
// IsNetOverlap is a utility function that returns true if the two subnet have an overlap.
func (i IPNet) IsNetOverlap(n net.IPNet) bool {
return n.Contains(i.IP) || i.Contains(n.IP)
}
// Covers returns true if the whole of n is covered by this CIDR.
func (i IPNet) Covers(n net.IPNet) bool {
if !i.Contains(n.IP) {
return false
} // else start of n is within our bounds, what about the end...
nPrefixLen, _ := n.Mask.Size()
iPrefixLen, _ := i.Mask.Size()
return iPrefixLen <= nPrefixLen
}
func (i IPNet) NthIP(n int) IP {
bigN := big.NewInt(int64(n))
return IncrementIP(IP{i.IP}, bigN)
}
// Network returns the masked IP network.
func (i *IPNet) Network() *IPNet {
_, n, _ := ParseCIDR(i.String())
return n
}
func ParseCIDR(c string) (*IP, *IPNet, error) {
netIP, netIPNet, e := net.ParseCIDR(c)
if netIPNet == nil || e != nil {
return nil, nil, e
}
ip := &IP{netIP}
ipnet := &IPNet{*netIPNet}
// The base golang net library always uses a 4-byte IPv4 address in an
// IPv4 IPNet, so for uniformity in the returned types, make sure the
// IP address is also 4-bytes - this allows the user to safely assume
// all IP addresses returned by this function use the same encoding
// mechanism (not strictly required but better for testing and debugging).
if ip4 := ip.IP.To4(); ip4 != nil {
ip.IP = ip4
}
return ip, ipnet, nil
}
// Parse a CIDR or an IP address and return the IP, CIDR or error. If an IP address
// string is supplied, then the CIDR returned is the fully masked IP address (i.e /32 or /128)
func ParseCIDROrIP(c string) (*IP, *IPNet, error) {
// First try parsing as a CIDR.
ip, cidr, err := ParseCIDR(c)
if err == nil {
return ip, cidr, nil
}
// That failed, so try parsing as an IP.
ip = &IP{}
if err2 := ip.UnmarshalText([]byte(c)); err2 == nil {
if ip4 := ip.IP.To4(); ip4 != nil {
ip.IP = ip4
}
n := ip.Network()
return ip, n, nil
}
// That failed too, return the original error.
return nil, nil, err
}
// String returns a friendly name for the network. The standard net package
// implements String() on the pointer, which means it will not be invoked on a
// struct type, so we re-implement on the struct type.
func (i IPNet) String() string {
ip := &i.IPNet
return ip.String()
}
func (i IPNet) NumAddrs() *big.Int {
ones, bits := i.Mask.Size()
zeros := bits - ones
numAddrs := big.NewInt(1)
return numAddrs.Lsh(numAddrs, uint(zeros))
}
// MustParseNetwork parses the string into an IPNet. The IP address in the
// IPNet is masked.
func MustParseNetwork(c string) IPNet {
_, cidr, err := ParseCIDR(c)
if err != nil {
panic(err)
}
return *cidr
}
// MustParseCIDR parses the string into an IPNet. The IP address in the
// IPNet is not masked.
func MustParseCIDR(c string) IPNet {
ip, cidr, err := ParseCIDR(c)
if err != nil {
panic(err)
}
n := IPNet{}
n.IP = ip.IP
n.Mask = cidr.Mask
return n
}

View File

@@ -0,0 +1,45 @@
// Copyright (c) 2016 Tigera, Inc. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package net
import (
"net"
"github.com/projectcalico/calico/libcalico-go/lib/json"
)
// Sub class net.HardwareAddr so that we can add JSON marshalling and unmarshalling.
type MAC struct {
net.HardwareAddr
}
// MarshalJSON interface for a MAC
func (m MAC) MarshalJSON() ([]byte, error) {
return json.Marshal(m.String())
}
// UnmarshalJSON interface for a MAC
func (m *MAC) UnmarshalJSON(b []byte) error {
var s string
if err := json.Unmarshal(b, &s); err != nil {
return err
}
if mac, err := net.ParseMAC(s); err != nil {
return err
} else {
m.HardwareAddr = mac
return nil
}
}

View File

@@ -0,0 +1,166 @@
// Copyright (c) 2016-2022 Tigera, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package set
import (
"bytes"
"fmt"
log "github.com/sirupsen/logrus"
)
// NewBoxed creates a new "boxed" Set, where the items stored in the set are boxed inside an interface. The values
// placed into the set must be comparable (i.e. suitable for use as a map key). This is checked at runtime and the
// code will panic on trying to add a non-comparable entry.
//
// This implementation exists because Go's generics currently have a gap. The type set of the "comparable"
// constraint currently doesn't include interface types, which under Go's normal rules _are_ comparable (but may
// panic at runtime if the interface happens to contain a non-comparable object). If possible use a typed map
// via New() or From(); use this if you really need a Set[any] or Set[SomeInterface].
func NewBoxed[T any]() Boxed[T] {
return make(Boxed[T])
}
func FromBoxed[T any](members ...T) Boxed[T] {
s := NewBoxed[T]()
s.AddAll(members)
return s
}
func FromArrayBoxed[T any](membersArray []T) Boxed[T] {
s := NewBoxed[T]()
s.AddAll(membersArray)
return s
}
func Empty[T any]() Set[T] {
return (Boxed[T])(nil)
}
type Boxed[T any] map[any]v
func (set Boxed[T]) String() string {
var buf bytes.Buffer
_, _ = buf.WriteString("set.Set{")
first := true
set.Iter(func(item T) error {
if !first {
buf.WriteString(",")
} else {
first = false
}
_, _ = fmt.Fprint(&buf, item)
return nil
})
_, _ = buf.WriteString("}")
return buf.String()
}
func (set Boxed[T]) Len() int {
return len(set)
}
func (set Boxed[T]) Add(item T) {
set[item] = emptyValue
}
func (set Boxed[T]) AddAll(itemArray []T) {
for _, v := range itemArray {
set.Add(v)
}
}
// AddSet adds the contents of set "other" into the set.
func (set Boxed[T]) AddSet(other Set[T]) {
other.Iter(func(item T) error {
set.Add(item)
return nil
})
}
func (set Boxed[T]) Discard(item T) {
delete(set, item)
}
func (set Boxed[T]) Clear() {
for item := range set {
delete(set, item)
}
}
func (set Boxed[T]) Contains(item T) bool {
_, present := set[item]
return present
}
func (set Boxed[T]) Iter(visitor func(item T) error) {
loop:
for item := range set {
item := item.(T)
err := visitor(item)
switch err {
case StopIteration:
break loop
case RemoveItem:
delete(set, item)
case nil:
break
default:
log.WithError(err).Panic("Unexpected iteration error")
}
}
}
func (set Boxed[T]) Copy() Set[T] {
cpy := NewBoxed[T]()
for item := range set {
item := item.(T)
cpy.Add(item)
}
return cpy
}
func (set Boxed[T]) Slice() (s []T) {
for item := range set {
item := item.(T)
s = append(s, item)
}
return
}
func (set Boxed[T]) Equals(other Set[T]) bool {
if set.Len() != other.Len() {
return false
}
for item := range set {
item := item.(T)
if !other.Contains(item) {
return false
}
}
return true
}
func (set Boxed[T]) ContainsAll(other Set[T]) bool {
result := true
other.Iter(func(item T) error {
if !set.Contains(item) {
result = false
return StopIteration
}
return nil
})
return result
}

View File

@@ -0,0 +1,42 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package set
import (
"errors"
"fmt"
)
type Set[T any] interface {
Len() int
Add(T)
AddAll(itemArray []T)
AddSet(other Set[T])
Discard(T)
Clear()
Contains(T) bool
Iter(func(item T) error)
Copy() Set[T]
Equals(Set[T]) bool
ContainsAll(Set[T]) bool
Slice() []T
fmt.Stringer
}
var (
StopIteration = errors.New("stop iteration")
RemoveItem = errors.New("remove item")
)
type v struct{}
var emptyValue = v{}

View File

@@ -0,0 +1,150 @@
// Copyright (c) 2016-2022 Tigera, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package set
import (
"bytes"
"fmt"
log "github.com/sirupsen/logrus"
)
func New[T comparable]() Typed[T] {
return make(Typed[T])
}
func From[T comparable](members ...T) Typed[T] {
s := New[T]()
s.AddAll(members)
return s
}
func FromArray[T comparable](membersArray []T) Typed[T] {
s := New[T]()
s.AddAll(membersArray)
return s
}
type Typed[T comparable] map[T]v
func (set Typed[T]) String() string {
var buf bytes.Buffer
_, _ = buf.WriteString("set.Set{")
first := true
set.Iter(func(item T) error {
if !first {
buf.WriteString(",")
} else {
first = false
}
_, _ = fmt.Fprint(&buf, item)
return nil
})
_, _ = buf.WriteString("}")
return buf.String()
}
func (set Typed[T]) Len() int {
return len(set)
}
func (set Typed[T]) Add(item T) {
set[item] = emptyValue
}
func (set Typed[T]) AddAll(itemArray []T) {
for _, v := range itemArray {
set.Add(v)
}
}
// AddSet adds the contents of set "other" into the set.
func (set Typed[T]) AddSet(other Set[T]) {
other.Iter(func(item T) error {
set.Add(item)
return nil
})
}
func (set Typed[T]) Discard(item T) {
delete(set, item)
}
func (set Typed[T]) Clear() {
for item := range set {
delete(set, item)
}
}
func (set Typed[T]) Contains(item T) bool {
_, present := set[item]
return present
}
func (set Typed[T]) Iter(visitor func(item T) error) {
loop:
for item := range set {
err := visitor(item)
switch err {
case StopIteration:
break loop
case RemoveItem:
delete(set, item)
case nil:
break
default:
log.WithError(err).Panic("Unexpected iteration error")
}
}
}
func (set Typed[T]) Copy() Set[T] {
cpy := New[T]()
for item := range set {
cpy.Add(item)
}
return cpy
}
func (set Typed[T]) Slice() (s []T) {
for item := range set {
s = append(s, item)
}
return
}
func (set Typed[T]) Equals(other Set[T]) bool {
if set.Len() != other.Len() {
return false
}
for item := range set {
if !other.Contains(item) {
return false
}
}
return true
}
func (set Typed[T]) ContainsAll(other Set[T]) bool {
result := true
other.Iter(func(item T) error {
if !set.Contains(item) {
result = false
return StopIteration
}
return nil
})
return result
}