commit 75430de2e0033f6a4d4d7830a92ad3c52bdc615d Author: Markos Gogoulos Date: Tue Dec 15 23:33:43 2020 +0200 MediaCMS backend, initial commit diff --git a/AUTHORS.txt b/AUTHORS.txt new file mode 100644 index 0000000..b76761c --- /dev/null +++ b/AUTHORS.txt @@ -0,0 +1,4 @@ +Wordgames.gr - https://www.wordgames.gr +Yiannis Stergiou - ys.stergiou@gmail.com +Markos Gogoulos - mgogoulos@gmail.com + diff --git a/LICENSE.txt b/LICENSE.txt new file mode 100644 index 0000000..de81495 --- /dev/null +++ b/LICENSE.txt @@ -0,0 +1,661 @@ +GNU AFFERO GENERAL PUBLIC LICENSE + Version 3, 19 November 2007 + +Copyright (C) 2007 Free Software Foundation, Inc. +Everyone is permitted to copy and distribute verbatim copies +of this license document, but changing it is not allowed. + + Preamble + +The GNU Affero General Public License is a free, copyleft license for +software and other kinds of works, specifically designed to ensure +cooperation with the community in the case of network server software. + +The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +our General Public Licenses are intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. + +When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + +Developers that use our General Public Licenses protect your rights +with two steps: (1) assert copyright on the software, and (2) offer +you this License which gives you legal permission to copy, distribute +and/or modify the software. + +A secondary benefit of defending all users' freedom is that +improvements made in alternate versions of the program, if they +receive widespread use, become available for other developers to +incorporate. Many developers of free software are heartened and +encouraged by the resulting cooperation. However, in the case of +software used on network servers, this result may fail to come about. +The GNU General Public License permits making a modified version and +letting the public access it on a server without ever releasing its +source code to the public. + +The GNU Affero General Public License is designed specifically to +ensure that, in such cases, the modified source code becomes available +to the community. It requires the operator of a network server to +provide the source code of the modified version running there to the +users of that server. Therefore, public use of a modified version, on +a publicly accessible server, gives the public access to the source +code of the modified version. + +An older license, called the Affero General Public License and +published by Affero, was designed to accomplish similar goals. This is +a different license, not a version of the Affero GPL, but Affero has +released a new version of the Affero GPL which permits relicensing under +this license. + +The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + +0. Definitions. + +"This License" refers to version 3 of the GNU Affero General Public License. + +"Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + +"The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + +To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + +A "covered work" means either the unmodified Program or a work based +on the Program. + +To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + +To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + +An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + +1. Source Code. + +The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + +A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + +The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + +The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + +The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + +The Corresponding Source for a work in source code form is that +same work. + +2. Basic Permissions. + +All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + +You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + +Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + +3. Protecting Users' Legal Rights From Anti-Circumvention Law. + +No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + +When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + +4. Conveying Verbatim Copies. + +You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + +You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + +5. Conveying Modified Source Versions. + +You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + +a) The work must carry prominent notices stating that you modified +it, and giving a relevant date. + +b) The work must carry prominent notices stating that it is +released under this License and any conditions added under section +7. This requirement modifies the requirement in section 4 to +"keep intact all notices". + +c) You must license the entire work, as a whole, under this +License to anyone who comes into possession of a copy. This +License will therefore apply, along with any applicable section 7 +additional terms, to the whole of the work, and all its parts, +regardless of how they are packaged. This License gives no +permission to license the work in any other way, but it does not +invalidate such permission if you have separately received it. + +d) If the work has interactive user interfaces, each must display +Appropriate Legal Notices; however, if the Program has interactive +interfaces that do not display Appropriate Legal Notices, your +work need not make them do so. + +A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + +6. Conveying Non-Source Forms. + +You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + +a) Convey the object code in, or embodied in, a physical product +(including a physical distribution medium), accompanied by the +Corresponding Source fixed on a durable physical medium +customarily used for software interchange. + +b) Convey the object code in, or embodied in, a physical product +(including a physical distribution medium), accompanied by a +written offer, valid for at least three years and valid for as +long as you offer spare parts or customer support for that product +model, to give anyone who possesses the object code either (1) a +copy of the Corresponding Source for all the software in the +product that is covered by this License, on a durable physical +medium customarily used for software interchange, for a price no +more than your reasonable cost of physically performing this +conveying of source, or (2) access to copy the +Corresponding Source from a network server at no charge. + +c) Convey individual copies of the object code with a copy of the +written offer to provide the Corresponding Source. This +alternative is allowed only occasionally and noncommercially, and +only if you received the object code with such an offer, in accord +with subsection 6b. + +d) Convey the object code by offering access from a designated +place (gratis or for a charge), and offer equivalent access to the +Corresponding Source in the same way through the same place at no +further charge. You need not require recipients to copy the +Corresponding Source along with the object code. If the place to +copy the object code is a network server, the Corresponding Source +may be on a different server (operated by you or a third party) +that supports equivalent copying facilities, provided you maintain +clear directions next to the object code saying where to find the +Corresponding Source. Regardless of what server hosts the +Corresponding Source, you remain obligated to ensure that it is +available for as long as needed to satisfy these requirements. + +e) Convey the object code using peer-to-peer transmission, provided +you inform other peers where the object code and Corresponding +Source of the work are being offered to the general public at no +charge under subsection 6d. + +A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + +A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + +"Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + +If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + +The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + +Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + +7. Additional Terms. + +"Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + +When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + +Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + +a) Disclaiming warranty or limiting liability differently from the +terms of sections 15 and 16 of this License; or + +b) Requiring preservation of specified reasonable legal notices or +author attributions in that material or in the Appropriate Legal +Notices displayed by works containing it; or + +c) Prohibiting misrepresentation of the origin of that material, or +requiring that modified versions of such material be marked in +reasonable ways as different from the original version; or + +d) Limiting the use for publicity purposes of names of licensors or +authors of the material; or + +e) Declining to grant rights under trademark law for use of some +trade names, trademarks, or service marks; or + +f) Requiring indemnification of licensors and authors of that +material by anyone who conveys the material (or modified versions of +it) with contractual assumptions of liability to the recipient, for +any liability that these contractual assumptions directly impose on +those licensors and authors. + +All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + +If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + +Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + +8. Termination. + +You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + +However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + +Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + +Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + +9. Acceptance Not Required for Having Copies. + +You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + +10. Automatic Licensing of Downstream Recipients. + +Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + +An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + +You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + +11. Patents. + +A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + +A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + +Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + +In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + +If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + +If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + +A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + +Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + +12. No Surrender of Others' Freedom. + +If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + +13. Remote Network Interaction; Use with the GNU General Public License. + +Notwithstanding any other provision of this License, if you modify the +Program, your modified version must prominently offer all users +interacting with it remotely through a computer network (if your version +supports such interaction) an opportunity to receive the Corresponding +Source of your version by providing access to the Corresponding Source +from a network server at no charge, through some standard or customary +means of facilitating copying of software. This Corresponding Source +shall include the Corresponding Source for any work covered by version 3 +of the GNU General Public License that is incorporated pursuant to the +following paragraph. + +Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the work with which it is combined will remain governed by version +3 of the GNU General Public License. + +14. Revised Versions of this License. + +The Free Software Foundation may publish revised and/or new versions of +the GNU Affero General Public License from time to time. Such new versions +will be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU Affero General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU Affero General Public License, you may choose any version ever published +by the Free Software Foundation. + +If the Program specifies that a proxy can decide which future +versions of the GNU Affero General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + +Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + +15. Disclaimer of Warranty. + +THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + +16. Limitation of Liability. + +IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + +17. Interpretation of Sections 15 and 16. + +If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + +How to Apply These Terms to Your New Programs + +If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + +To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + +MediaCMS: Modern, fully featured open source video and media CMS +Copyright (C) 2020 Markos Gogoulos and Yiannis Stergiou + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published +by the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + +If your software can interact with users remotely through a computer +network, you should also make sure that it provides a way for users to +get its source. For example, if your program is a web application, its +interface could display a "Source" link that leads users to an archive +of the code. There are many ways you could offer source, and different +solutions will be better for different programs; see section 13 for the +specific requirements. + +You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU AGPL, see +. \ No newline at end of file diff --git a/README.md b/README.md new file mode 100644 index 0000000..1286c24 --- /dev/null +++ b/README.md @@ -0,0 +1,147 @@ +![MediaCMS](static/images/logo_dark.png) + +MediaCMS is a modern, fully featured open source video and media CMS. It is developed to meet the needs of modern web platforms for viewing and sharing media. It can be used to build a small to medium video and media portal within minutes. + +It is built mostly using the modern stack Django + React and includes a REST API. + +## Screenshots + +![MediaCMS](docs/images/index.jpg) + +Vanilla MediaCMS index page + +![MediaCMS](docs/images/video.jpg) + +Video page with player different options + +![MediaCMS](docs/images/embed.jpg) + +Embed video page + + +## Features +- **Complete control over your data**: host it yourself! +- **Support for multiple publishing workflows**: public, private, unlisted and custom +- **Modern technologies**: Django/Python/Celery, React. +- **Multiple media types support**: video, audio, image, pdf +- **Multiple media classification options**: categories, tags and custom +- **Multiple media sharing options**: social media share, videos embed code generation +- **Easy media searching**: enriched with live search functionality +- **Responsive design**: including light and dark themes +- **Advanced users management**: allow self registration, invite only, closed. +- **Configurable actions**: allow download, add comments, add likes, dislikes, report media +- **Configuration options**: change logos, fonts, styling, add more pages +- **Enhanced video player**: customized video.js player with multiple resolution and playback speed options +- **Multiple transcoding profiles**: sane defaults for multiple dimensions (240p, 360p, 480p, 720p, 1080p) and multiple profiles (h264, h265, vp9) +- **Adaptive video streaming**: possible through HLS protocol +- **Subtitles/CC**: support for multilingual subtitle files +- **Scalable transcoding**: transcoding through priorities. Experimental support for remote workers +- **Chunked file uploads**: for pausable/resumable upload of content + + +## Example cases + +- **Schools, education.** Administrators and editors keep what content will be published, students are not distracted with advertisements and irrelevant content, plus they have the ability to select either to stream or download content. + +- **Organization sensitive content.** In cases where content is sensitive and cannot be uploaded to external sites. + +- **Build a great community.** MediaCMS can be customized (URLs, logos, fonts, aesthetics) so that you create a highly customized video portal for your community! + +- **Personal portal.** Organize, categorize and host your content the way you prefer. + + +## Philosophy + +We believe there's a need for quality open source web applications that can be used to build community portals and support collaboration. + +We have three goals for MediaCMS: a) deliver all functionality one would expect from a modern system, b) allow for easy installation and maintenance, c) allow easy customization and addition of features. + + +## License + +MediaCMS is released under [GNU Affero General Public License v3.0 license](LICENSE.txt). +Copyright Markos Gogoulos and Yiannis Stergiou + + +## Support and paid services + +We provide custom installations, development of extra functionality, migration from existing systems, integrations with legacy systems, training and support. Contact us at info@mediacms.io for more information. + + + +## Hardware dependencies + +For a small to medium installation, with a few hours of video uploaded daily, and a few hundreds of active daily users viewing content, 4GB Ram / 2-4 CPUs as minimum is ok. For a larger installation with many hours of video uploaded daily, consider adding more CPUs and more Ram. + +In terms of disk space, think of what the needs will be. A general rule is to multiply by three the size of the expected uploaded videos (since the system keeps original versions, encoded versions plus HLS), so if you receive 1G of videos daily and maintain all of them, you should consider a 1T disk across a year (1G * 3 * 365). + + +## Install + +The core dependencies are Python3, Django3, Celery, PostgreSQL, Redis, ffmpeg. Any system that can have these dependencies installed, can run MediaCMS. But we strongly suggest installing on Linux Ubuntu 18 or 20 versions. + +Installation on a Ubuntu 18 or 20 system with git utility installed should be completed in a few minutes with the following steps. +Make sure you run it as user root, on a clear system, since the automatic script will install and configure the following services: Celery/PostgreSQL/Redis/Nginx and will override any existing settings. + +Automated script - to run on Ubuntu 18 or Ubuntu 20 flavors only! + +```bash +mkdir /home/mediacms.io && cd /home/mediacms.io/ +git clone https://github.com/mediacms-io/mediacms +cd /home/mediacms.io/mediacms/ && bash ./install.sh +``` + +The script will ask if you have a URL where you want to deploy MediaCMS, otherwise it will use localhost. If you provide a URL, it will use Let's Encrypt service to install a valid ssl certificate. + + +## Configure + +Several options are available on cms/settings.py, most of the things that are allowed or should be disallowed are described there. It is advisable to override any of them by adding it to cms/local_settings.py. All configuration options will be documented gradually on the [Configuration](docs/Configuration.md) page. + +## Authors +MediaCMS is developed by Yiannis Stergiou and Markos Gogoulos. We are Wordgames - https://wordgames.gr + + +## Technology +This software uses the following list of awesome technologies: +- Python +- Django +- Django Rest Framework +- Celery +- PostgreSQL +- Redis +- Nginx +- uWSGI +- React +- Fine Uploader +- video.js +- FFMPEG +- Bento4 + + +## Who is using it + +- **EngageMedia** non-profit media, technology and culture organization - https://video.engagemedia.org + +- **Critical Commons** public media archive and fair use advocacy network - https://criticalcommons.org + +- **Heritales** International Heritage Film Festival - https://stage.heritales.org + + +## Thanks To + +- **Anna Helme**, for such a great partnership all these years! + +- **Steve Anderson**, for trusting us and helping the Wordgames team make this real. + +- **Andrew Lowenthal, King Catoy, Rezwan Islam** and the rest of the great team of [Engage Media](https://engagemedia.org). + +- **Ioannis Korovesis, Ioannis Maistros, Diomidis Spinellis and Theodoros Karounos**, for their mentorship all these years, their contribution to science and the promotion of open source and free software technologies. + +- **Antonis Ikonomou**, for hosting us on the excellent [Innovathens](https://www.innovathens.gr) space. + +- **Werner Robitza**, for helping us with ffmpeg related stuff. + + +## Contact +info@mediacms.io diff --git a/actions/__init__.py b/actions/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/actions/admin.py b/actions/admin.py new file mode 100644 index 0000000..e69de29 diff --git a/actions/apps.py b/actions/apps.py new file mode 100644 index 0000000..ce61600 --- /dev/null +++ b/actions/apps.py @@ -0,0 +1,5 @@ +from django.apps import AppConfig + + +class ActionsConfig(AppConfig): + name = "actions" diff --git a/actions/migrations/0001_initial.py b/actions/migrations/0001_initial.py new file mode 100644 index 0000000..4f6a65d --- /dev/null +++ b/actions/migrations/0001_initial.py @@ -0,0 +1,54 @@ +# Generated by Django 3.1.4 on 2020-12-01 07:12 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + initial = True + + dependencies = [] + + operations = [ + migrations.CreateModel( + name="MediaAction", + fields=[ + ( + "id", + models.AutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ( + "session_key", + models.CharField( + blank=True, + db_index=True, + help_text="for not logged in users", + max_length=33, + null=True, + ), + ), + ( + "action", + models.CharField( + choices=[ + ("like", "Like"), + ("dislike", "Dislike"), + ("watch", "Watch"), + ("report", "Report"), + ("rate", "Rate"), + ], + default="watch", + max_length=20, + ), + ), + ("extra_info", models.TextField(blank=True, null=True)), + ("action_date", models.DateTimeField(auto_now_add=True)), + ("remote_ip", models.CharField(blank=True, max_length=40, null=True)), + ], + ), + ] diff --git a/actions/migrations/0002_mediaaction_media.py b/actions/migrations/0002_mediaaction_media.py new file mode 100644 index 0000000..0a0372a --- /dev/null +++ b/actions/migrations/0002_mediaaction_media.py @@ -0,0 +1,26 @@ +# Generated by Django 3.1.4 on 2020-12-01 07:12 + +from django.db import migrations, models +import django.db.models.deletion + + +class Migration(migrations.Migration): + + initial = True + + dependencies = [ + ("actions", "0001_initial"), + ("files", "0001_initial"), + ] + + operations = [ + migrations.AddField( + model_name="mediaaction", + name="media", + field=models.ForeignKey( + on_delete=django.db.models.deletion.CASCADE, + related_name="mediaactions", + to="files.media", + ), + ), + ] diff --git a/actions/migrations/0003_auto_20201201_0712.py b/actions/migrations/0003_auto_20201201_0712.py new file mode 100644 index 0000000..80178ef --- /dev/null +++ b/actions/migrations/0003_auto_20201201_0712.py @@ -0,0 +1,42 @@ +# Generated by Django 3.1.4 on 2020-12-01 07:12 + +from django.conf import settings +from django.db import migrations, models +import django.db.models.deletion + + +class Migration(migrations.Migration): + + initial = True + + dependencies = [ + migrations.swappable_dependency(settings.AUTH_USER_MODEL), + ("actions", "0002_mediaaction_media"), + ] + + operations = [ + migrations.AddField( + model_name="mediaaction", + name="user", + field=models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.CASCADE, + related_name="useractions", + to=settings.AUTH_USER_MODEL, + ), + ), + migrations.AddIndex( + model_name="mediaaction", + index=models.Index( + fields=["user", "action", "-action_date"], + name="actions_med_user_id_940054_idx", + ), + ), + migrations.AddIndex( + model_name="mediaaction", + index=models.Index( + fields=["session_key", "action"], name="actions_med_session_fac55a_idx" + ), + ), + ] diff --git a/actions/migrations/__init__.py b/actions/migrations/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/actions/models.py b/actions/models.py new file mode 100644 index 0000000..64a6d4d --- /dev/null +++ b/actions/models.py @@ -0,0 +1,55 @@ +from django.db import models +from users.models import User +from files.models import Media + +USER_MEDIA_ACTIONS = ( + ("like", "Like"), + ("dislike", "Dislike"), + ("watch", "Watch"), + ("report", "Report"), + ("rate", "Rate"), +) + + +class MediaAction(models.Model): + """Stores different user actions""" + + user = models.ForeignKey( + User, + on_delete=models.CASCADE, + db_index=True, + blank=True, + null=True, + related_name="useractions", + ) + session_key = models.CharField( + max_length=33, + db_index=True, + blank=True, + null=True, + help_text="for not logged in users", + ) + + action = models.CharField( + max_length=20, choices=USER_MEDIA_ACTIONS, default="watch" + ) + # keeps extra info, eg on report action, why it is reported + extra_info = models.TextField(blank=True, null=True) + + media = models.ForeignKey( + Media, on_delete=models.CASCADE, related_name="mediaactions" + ) + action_date = models.DateTimeField(auto_now_add=True) + remote_ip = models.CharField(max_length=40, blank=True, null=True) + + def save(self, *args, **kwargs): + super(MediaAction, self).save(*args, **kwargs) + + def __str__(self): + return self.action + + class Meta: + indexes = [ + models.Index(fields=["user", "action", "-action_date"]), + models.Index(fields=["session_key", "action"]), + ] diff --git a/actions/tests.py b/actions/tests.py new file mode 100644 index 0000000..e69de29 diff --git a/actions/views.py b/actions/views.py new file mode 100644 index 0000000..e69de29 diff --git a/cms/__init__.py b/cms/__init__.py new file mode 100644 index 0000000..1afbeb5 --- /dev/null +++ b/cms/__init__.py @@ -0,0 +1,4 @@ +from __future__ import absolute_import +from .celery import app as celery_app + +__all__ = ["celery_app"] diff --git a/cms/celery.py b/cms/celery.py new file mode 100644 index 0000000..8b21f5b --- /dev/null +++ b/cms/celery.py @@ -0,0 +1,16 @@ +from __future__ import absolute_import +import os +from celery import Celery + +os.environ.setdefault("DJANGO_SETTINGS_MODULE", "cms.settings") +app = Celery("cms") + +app.config_from_object("django.conf:settings") +app.autodiscover_tasks() + +app.conf.beat_schedule = app.conf.CELERY_BEAT_SCHEDULE +app.conf.broker_transport_options = {"visibility_timeout": 60 * 60 * 24} # 1 day +# http://docs.celeryproject.org/en/latest/getting-started/brokers/redis.html#redis-caveats + + +app.conf.worker_prefetch_multiplier = 1 diff --git a/cms/custom_pagination.py b/cms/custom_pagination.py new file mode 100644 index 0000000..170ed61 --- /dev/null +++ b/cms/custom_pagination.py @@ -0,0 +1,29 @@ +from rest_framework.pagination import PageNumberPagination +from rest_framework.response import Response +from collections import OrderedDict # requires Python 2.7 or later +from django.core.paginator import Paginator +from django.utils.functional import cached_property + + +class FasterDjangoPaginator(Paginator): + @cached_property + def count(self): + return 50 + + +class FastPaginationWithoutCount(PageNumberPagination): + """Experimental, for cases where a SELECT COUNT is redundant""" + + django_paginator_class = FasterDjangoPaginator + + def get_paginated_response(self, data): + + return Response( + OrderedDict( + [ + ("next", self.get_next_link()), + ("previous", self.get_previous_link()), + ("results", data), + ] + ) + ) diff --git a/cms/permissions.py b/cms/permissions.py new file mode 100644 index 0000000..f456dbe --- /dev/null +++ b/cms/permissions.py @@ -0,0 +1,64 @@ +from django.conf import settings +from rest_framework import permissions +from files.methods import is_mediacms_editor, is_mediacms_manager + + +class IsAuthorizedToAdd(permissions.BasePermission): + def has_permission(self, request, view): + if request.method in permissions.SAFE_METHODS: + return True + return user_allowed_to_upload(request) + + +class IsUserOrManager(permissions.BasePermission): + """To be used in cases where request.user is either the + object owner, or anyone amongst MediaCMS managers + or superusers + """ + + def has_object_permission(self, request, view, obj): + if request.method in permissions.SAFE_METHODS: + return True + if request.user.is_superuser: + return True + if is_mediacms_manager(request.user): + return True + + return obj == request.user + + +class IsUserOrEditor(permissions.BasePermission): + """To be used in cases where request.user is either the + object owner, or anyone amongst MediaCMS editors, managers + or superusers + """ + + def has_object_permission(self, request, view, obj): + if request.method in permissions.SAFE_METHODS: + return True + if request.user.is_superuser: + return True + if is_mediacms_editor(request.user): + return True + + return obj == request.user + + +def user_allowed_to_upload(request): + """Any custom logic for whether a user is allowed + to upload content lives here + """ + if request.user.is_anonymous: + return False + if request.user.is_superuser: + return True + + if settings.CAN_ADD_MEDIA == "all": + return True + elif settings.CAN_ADD_MEDIA == "email_verified": + if request.user.email_is_verified: + return True + elif settings.CAN_ADD_MEDIA == "advancedUser": + if request.user.advancedUser: + return True + return False diff --git a/cms/settings.py b/cms/settings.py new file mode 100644 index 0000000..b5de473 --- /dev/null +++ b/cms/settings.py @@ -0,0 +1,437 @@ +import os +from celery.schedules import crontab + +DEBUG = False + +# PORTAL NAME, this is the portal title and +# is also shown on several places as emails +PORTAL_NAME = "MediaCMS" +LANGUAGE_CODE = "en-us" +TIME_ZONE = "Europe/London" + +# who can add media +# valid options include 'all', 'email_verified', 'advancedUser' +CAN_ADD_MEDIA = "all" + +# valid choices here are 'public', 'private', 'unlisted +PORTAL_WORKFLOW = "public" + +DEFAULT_THEME = "black" # this is not taken under consideration currently + + +# These are passed on every request +# if set to False will not fetch external content +# this is only for the static files, as fonts/css/js files loaded from CDNs +# not for user uploaded media! +LOAD_FROM_CDN = True +LOGIN_ALLOWED = True # whether the login button appears +REGISTER_ALLOWED = True # whether the register button appears +UPLOAD_MEDIA_ALLOWED = True # whether the upload media button appears +CAN_LIKE_MEDIA = True # whether the like media appears +CAN_DISLIKE_MEDIA = True # whether the dislike media appears +CAN_REPORT_MEDIA = True # whether the report media appears +CAN_SHARE_MEDIA = True # whether the share media appears +# how many times an item need be reported +# to get to private state automatically +REPORTED_TIMES_THRESHOLD = 10 +ALLOW_ANONYMOUS_ACTIONS = ["report", "like", "dislike", "watch"] # need be a list + +# experimental functionality for user ratings - does not work +ALLOW_RATINGS = False +ALLOW_RATINGS_CONFIRMED_EMAIL_ONLY = True + +# ip of the server should be part of this +ALLOWED_HOSTS = ["*", "mediacms.io", "127.0.0.1", "localhost"] +FRONTEND_HOST = "http://localhost" +# FRONTEND_HOST needs an http prefix - at the end of the file +# there's a conversion to https with the SSL_FRONTEND_HOST env +INTERNAL_IPS = "127.0.0.1" + +# settings that are related with UX/appearance +# whether a featured item appears enlarged with player on index page +VIDEO_PLAYER_FEATURED_VIDEO_ON_INDEX_PAGE = False + +PRE_UPLOAD_MEDIA_MESSAGE = "" + +# email settings +DEFAULT_FROM_EMAIL = "info@mediacms.io" +EMAIL_HOST_PASSWORD = "xyz" +EMAIL_HOST_USER = "info@mediacms.io" +EMAIL_USE_TLS = True +SERVER_EMAIL = DEFAULT_FROM_EMAIL +EMAIL_HOST = "mediacms.io" +EMAIL_PORT = 587 +ADMIN_EMAIL_LIST = ["info@mediacms.io"] + + +MEDIA_IS_REVIEWED = True # whether an admin needs to review a media file. +# By default consider this is not needed. +# If set to False, then each new media need be reviewed otherwise +# it won't appear on public listings + +# if set to True the url for original file is returned to the API. +SHOW_ORIGINAL_MEDIA = True +# Keep in mind that nginx will serve the file unless there's +# some authentication taking place. Check nginx file and setup a +# basic http auth user/password if you want to restrict access + +MAX_MEDIA_PER_PLAYLIST = 70 +# bytes, size of uploaded media +UPLOAD_MAX_SIZE = 800 * 1024 * 1000 * 5 + +MAX_CHARS_FOR_COMMENT = 10000 # so that it doesn't end up huge + +# valid options: content, author +RELATED_MEDIA_STRATEGY = "content" + +USE_I18N = True +USE_L10N = True +USE_TZ = True +SITE_ID = 1 + +# protection agains anonymous users +# per ip address limit, for actions as like/dislike/report +TIME_TO_ACTION_ANONYMOUS = 10 * 60 + +# django-allauth settings +ACCOUNT_SESSION_REMEMBER = True +ACCOUNT_AUTHENTICATION_METHOD = "username_email" +ACCOUNT_EMAIL_REQUIRED = True # new users need to specify email +ACCOUNT_EMAIL_VERIFICATION = "optional" # 'mandatory' 'none' +ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True +ACCOUNT_USERNAME_MIN_LENGTH = "4" +ACCOUNT_ADAPTER = "users.adapter.MyAccountAdapter" +ACCOUNT_SIGNUP_FORM_CLASS = "users.forms.SignupForm" +ACCOUNT_USERNAME_VALIDATORS = "users.validators.custom_username_validators" +ACCOUNT_SIGNUP_PASSWORD_ENTER_TWICE = False +ACCOUNT_USERNAME_REQUIRED = True +ACCOUNT_LOGIN_ON_PASSWORD_RESET = True +ACCOUNT_EMAIL_CONFIRMATION_EXPIRE_DAYS = 1 +ACCOUNT_LOGIN_ATTEMPTS_LIMIT = 20 +ACCOUNT_LOGIN_ATTEMPTS_TIMEOUT = 5 +# registration won't be open, might also consider to remove links for register +USERS_CAN_SELF_REGISTER = True + +RESTRICTED_DOMAINS_FOR_USER_REGISTRATION = ["xxx.com", "emaildomainwhatever.com"] + +# django rest settings +REST_FRAMEWORK = { + "DEFAULT_AUTHENTICATION_CLASSES": ( + "rest_framework.authentication.SessionAuthentication", + "rest_framework.authentication.BasicAuthentication", + "rest_framework.authentication.TokenAuthentication", + ), + "DEFAULT_PAGINATION_CLASS": "rest_framework.pagination.PageNumberPagination", + "PAGE_SIZE": 50, + "DEFAULT_PARSER_CLASSES": [ + "rest_framework.parsers.JSONParser", + ], +} + + +SECRET_KEY = "2dii4cog7k=5n37$fz)8dst)kg(s3&10)^qa*gv(kk+nv-z&cu" +# TODO: this needs to be changed! + +TEMP_DIRECTORY = "/tmp" # Don't use a temp directory inside BASE_DIR!!! +BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) +STATIC_URL = "/static/" # where js/css files are stored on the filesystem +MEDIA_URL = "/media/" # URL where static files are served from the server +STATIC_ROOT = BASE_DIR + "/static/" +# where uploaded + encoded media are stored +MEDIA_ROOT = BASE_DIR + "/media_files/" + +MEDIA_UPLOAD_DIR = os.path.join(MEDIA_ROOT, "original/") +MEDIA_ENCODING_DIR = os.path.join(MEDIA_ROOT, "encoded/") +THUMBNAIL_UPLOAD_DIR = os.path.join(MEDIA_UPLOAD_DIR, "thumbnails/") +SUBTITLES_UPLOAD_DIR = os.path.join(MEDIA_UPLOAD_DIR, "subtitles/") +HLS_DIR = os.path.join(MEDIA_ROOT, "hls/") + +FFMPEG_COMMAND = "ffmpeg" # this is the path +FFPROBE_COMMAND = "ffprobe" # this is the path +MP4HLS = "mp4hls" + +MASK_IPS_FOR_ACTIONS = True +# how many seconds a process in running state without reporting progress is +# considered as stale...unfortunately v9 seems to not include time +# some times so raising this high +RUNNING_STATE_STALE = 60 * 60 * 2 + +FRIENDLY_TOKEN_LEN = 9 + +# for videos, after that duration get split into chunks +# and encoded independently +CHUNKIZE_VIDEO_DURATION = 60 * 5 +# aparently this has to be smaller than VIDEO_CHUNKIZE_DURATION +VIDEO_CHUNKS_DURATION = 60 * 4 + +# always get these two, even if upscaling +MINIMUM_RESOLUTIONS_TO_ENCODE = [240, 360] + +# default settings for notifications +# not all of them are implemented + +USERS_NOTIFICATIONS = { + "MEDIA_ADDED": True, # in use + "MEDIA_ENCODED": False, # not implemented + "MEDIA_REPORTED": False, # not implemented +} + +ADMINS_NOTIFICATIONS = { + "NEW_USER": True, # in use + "MEDIA_ADDED": True, # in use + "MEDIA_ENCODED": False, # not implemented + "MEDIA_REPORTED": True, # in use +} + + +# this is for fineuploader - media uploads +UPLOAD_DIR = "uploads/" +CHUNKS_DIR = "chunks/" + +# number of files to upload using fineuploader at once +UPLOAD_MAX_FILES_NUMBER = 100 +CONCURRENT_UPLOADS = True +CHUNKS_DONE_PARAM_NAME = "done" +FILE_STORAGE = "django.core.files.storage.DefaultStorage" + +X_FRAME_OPTIONS = "ALLOWALL" +EMAIL_BACKEND = "djcelery_email.backends.CeleryEmailBackend" +CELERY_EMAIL_TASK_CONFIG = { + "queue": "short_tasks", +} + +POST_UPLOAD_AUTHOR_MESSAGE_UNLISTED_NO_COMMENTARY = "" +# a message to be shown on the author of a media file and only +# only in case where unlisted workflow is used and no commentary +# exists + +CANNOT_ADD_MEDIA_MESSAGE = "" + +# mp4hls command, part of Bendo4 +MP4HLS_COMMAND = ( + "/home/mediacms.io/mediacms/Bento4-SDK-1-6-0-632.x86_64-unknown-linux/bin/mp4hls" +) + +# highly experimental, related with remote workers +ADMIN_TOKEN = "c2b8e1838b6128asd333ddc5e24" +# this is used by remote workers to push +# encodings once they are done +# USE_BASIC_HTTP = True +# BASIC_HTTP_USER_PAIR = ('user', 'password') +# specify basic auth user/password pair for use with the +# remote workers, if nginx basic auth is setup +# apache2-utils need be installed +# then run +# htpasswd -c /home/mediacms.io/mediacms/deploy/.htpasswd user +# and set a password +# edit /etc/nginx/sites-enabled/mediacms.io and +# uncomment the two lines related to htpasswd + + +CKEDITOR_CONFIGS = { + "default": { + "toolbar": "Custom", + "width": "100%", + "toolbar_Custom": [ + ["Styles"], + ["Format"], + ["Bold", "Italic", "Underline"], + ["HorizontalRule"], + [ + "NumberedList", + "BulletedList", + "-", + "Outdent", + "Indent", + "-", + "JustifyLeft", + "JustifyCenter", + "JustifyRight", + "JustifyBlock", + ], + ["Link", "Unlink"], + ["Image"], + ["RemoveFormat", "Source"], + ], + } +} + + +AUTH_USER_MODEL = "users.User" +LOGIN_REDIRECT_URL = "/" + +AUTHENTICATION_BACKENDS = ( + "django.contrib.auth.backends.ModelBackend", + "allauth.account.auth_backends.AuthenticationBackend", +) + +INSTALLED_APPS = [ + "django.contrib.admin", + "django.contrib.auth", + "allauth", + "allauth.account", + "allauth.socialaccount", + "django.contrib.contenttypes", + "django.contrib.sessions", + "django.contrib.messages", + "django.contrib.staticfiles", + "django.contrib.sites", + "rest_framework", + "rest_framework.authtoken", + "imagekit", + "files.apps.FilesConfig", + "users.apps.UsersConfig", + "actions.apps.ActionsConfig", + "debug_toolbar", + "mptt", + "crispy_forms", + "uploader.apps.UploaderConfig", + "djcelery_email", + "ckeditor", +] + +MIDDLEWARE = [ + "django.middleware.security.SecurityMiddleware", + "django.contrib.sessions.middleware.SessionMiddleware", + "django.middleware.common.CommonMiddleware", + "django.middleware.csrf.CsrfViewMiddleware", + "django.contrib.auth.middleware.AuthenticationMiddleware", + "django.contrib.messages.middleware.MessageMiddleware", + "django.middleware.clickjacking.XFrameOptionsMiddleware", + "debug_toolbar.middleware.DebugToolbarMiddleware", +] + +ROOT_URLCONF = "cms.urls" + +TEMPLATES = [ + { + "BACKEND": "django.template.backends.django.DjangoTemplates", + "DIRS": ["templates"], + "APP_DIRS": True, + "OPTIONS": { + "context_processors": [ + "django.template.context_processors.debug", + "django.template.context_processors.request", + "django.contrib.auth.context_processors.auth", + "django.template.context_processors.media", + "django.contrib.messages.context_processors.messages", + "files.context_processors.stuff", + ], + }, + }, +] + +WSGI_APPLICATION = "cms.wsgi.application" + +AUTH_PASSWORD_VALIDATORS = [ + { + "NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator", + }, + { + "NAME": "django.contrib.auth.password_validation.MinimumLengthValidator", + "OPTIONS": { + "min_length": 5, + }, + }, + { + "NAME": "django.contrib.auth.password_validation.CommonPasswordValidator", + }, +] + +FILE_UPLOAD_HANDLERS = [ + "django.core.files.uploadhandler.TemporaryFileUploadHandler", +] + +LOGS_DIR = os.path.join(BASE_DIR, "logs") + +LOGGING = { + "version": 1, + "disable_existing_loggers": False, + "handlers": { + "file": { + "level": "ERROR", + "class": "logging.FileHandler", + "filename": os.path.join(LOGS_DIR, "debug.log"), + }, + }, + "loggers": { + "django": { + "handlers": ["file"], + "level": "ERROR", + "propagate": True, + }, + }, +} + +DATABASES = { + "default": { + "ENGINE": "django.db.backends.postgresql", + "NAME": "mediacms", + "HOST": "127.0.0.1", + "PORT": "5432", + "USER": "mediacms", + "PASSWORD": "mediacms", + } +} + + +REDIS_LOCATION = "redis://127.0.0.1:6379/1" +CACHES = { + "default": { + "BACKEND": "django_redis.cache.RedisCache", + "LOCATION": REDIS_LOCATION, + "OPTIONS": { + "CLIENT_CLASS": "django_redis.client.DefaultClient", + }, + } +} + +SESSION_ENGINE = "django.contrib.sessions.backends.cache" +SESSION_CACHE_ALIAS = "default" + +# CELERY STUFF +BROKER_URL = REDIS_LOCATION +CELERY_RESULT_BACKEND = BROKER_URL +CELERY_ACCEPT_CONTENT = ["application/json"] +CELERY_TASK_SERIALIZER = "json" +CELERY_RESULT_SERIALIZER = "json" +CELERY_TIMEZONE = TIME_ZONE +CELERY_SOFT_TIME_LIMIT = 2 * 60 * 60 +CELERY_WORKER_PREFETCH_MULTIPLIER = 1 +CELERYD_PREFETCH_MULTIPLIER = 1 + +CELERY_BEAT_SCHEDULE = { + # clear expired sessions, every sunday 1.01am. By default Django has 2week + # expire date + "clear_sessions": { + "task": "clear_sessions", + "schedule": crontab(hour=1, minute=1, day_of_week=6), + }, + "get_list_of_popular_media": { + "task": "get_list_of_popular_media", + "schedule": crontab(minute=1, hour="*/10"), + }, + "update_listings_thumbnails": { + "task": "update_listings_thumbnails", + "schedule": crontab(minute=2, hour="*/30"), + }, +} +# TODO: beat, delete chunks from media root +# chunks_dir after xx days...(also uploads_dir) + +try: + # keep a local_settings.py file for local overrides + from .local_settings import * + + # ALLOWED_HOSTS needs a url/ip + ALLOWED_HOSTS.append(FRONTEND_HOST.replace("http://", "").replace("https://", "")) +except ImportError: + # local_settings not in use + pass + + +if "http" not in FRONTEND_HOST: + # FRONTEND_HOST needs a http:// preffix + FRONTEND_HOST = f"http://{FRONTEND_HOST}" + +SSL_FRONTEND_HOST = FRONTEND_HOST.replace("http", "https") diff --git a/cms/urls.py b/cms/urls.py new file mode 100644 index 0000000..ce6692a --- /dev/null +++ b/cms/urls.py @@ -0,0 +1,13 @@ +from django.contrib import admin +from django.urls import path +from django.conf.urls import url, include +import debug_toolbar + +urlpatterns = [ + url(r"^__debug__/", include(debug_toolbar.urls)), + url(r"^", include("files.urls")), + url(r"^", include("users.urls")), + url(r"^accounts/", include("allauth.urls")), + url(r"^api-auth/", include("rest_framework.urls")), + path("admin/", admin.site.urls), +] diff --git a/cms/wsgi.py b/cms/wsgi.py new file mode 100644 index 0000000..5f714e4 --- /dev/null +++ b/cms/wsgi.py @@ -0,0 +1,7 @@ +import os + +from django.core.wsgi import get_wsgi_application + +os.environ.setdefault("DJANGO_SETTINGS_MODULE", "cms.settings") + +application = get_wsgi_application() diff --git a/deploy/celery_beat.service b/deploy/celery_beat.service new file mode 100644 index 0000000..63a1333 --- /dev/null +++ b/deploy/celery_beat.service @@ -0,0 +1,24 @@ +[Unit] +Description=MediaCMS celery beat +After=network.target + +[Service] +Type=simple +User=www-data +Group=www-data +Restart=always +RestartSec=10 +Environment=APP_DIR="/home/mediacms.io/mediacms" +Environment=CELERY_BIN="/home/mediacms.io/bin/celery" +Environment=CELERY_APP="cms" +Environment=CELERYD_PID_FILE="/home/mediacms.io/mediacms/pids/beat%n.pid" +Environment=CELERYD_LOG_FILE="/home/mediacms.io/mediacms/logs/beat%N.log" +Environment=CELERYD_LOG_LEVEL="INFO" +Environment=APP_DIR="/home/mediacms.io/mediacms" + +ExecStart=/bin/sh -c '${CELERY_BIN} beat -A ${CELERY_APP} --pidfile=${CELERYD_PID_FILE} --logfile=${CELERYD_LOG_FILE} --loglevel=${CELERYD_LOG_LEVEL} ${CELERYD_OPTS} --workdir=${APP_DIR}' +ExecStop=/bin/kill -s TERM $MAINPID + +[Install] +WantedBy=multi-user.target + diff --git a/deploy/celery_long.service b/deploy/celery_long.service new file mode 100644 index 0000000..ab8ab8a --- /dev/null +++ b/deploy/celery_long.service @@ -0,0 +1,31 @@ +[Unit] +Description=MediaCMS celery long queue +After=network.target + +[Service] +Type=forking +User=www-data +Group=www-data +Restart=always +RestartSec=10 +Environment=APP_DIR="/home/mediacms.io/mediacms" +Environment=CELERYD_NODES="long1" +Environment=CELERY_QUEUE="long_tasks" +Environment=CELERY_BIN="/home/mediacms.io/bin/celery" +Environment=CELERY_APP="cms" +Environment=CELERYD_MULTI="multi" +Environment=CELERYD_OPTS="-Ofair --prefetch-multiplier=1" +Environment=CELERYD_PID_FILE="/home/mediacms.io/mediacms/pids/%n.pid" +Environment=CELERYD_LOG_FILE="/home/mediacms.io/mediacms/logs/%N.log" +Environment=CELERYD_LOG_LEVEL="INFO" +Environment=APP_DIR="/home/mediacms.io/mediacms" + +ExecStart=/bin/sh -c '${CELERY_BIN} multi start ${CELERYD_NODES} -A ${CELERY_APP} --pidfile=${CELERYD_PID_FILE} --logfile=${CELERYD_LOG_FILE} --loglevel=${CELERYD_LOG_LEVEL} ${CELERYD_OPTS} --workdir=${APP_DIR} -Q ${CELERY_QUEUE}' + +ExecStop=/bin/sh -c '${CELERY_BIN} multi stopwait ${CELERYD_NODES} --pidfile=${CELERYD_PID_FILE}' + +ExecReload=/bin/sh -c '${CELERY_BIN} multi restart ${CELERYD_NODES} -A ${CELERY_APP} --pidfile=${CELERYD_PID_FILE} --logfile=${CELERYD_LOG_FILE} --loglevel=${CELERYD_LOG_LEVEL} ${CELERYD_OPTS} --workdir=${APP_DIR} -Q ${CELERY_QUEUE}' + +[Install] +WantedBy=multi-user.target + diff --git a/deploy/celery_short.service b/deploy/celery_short.service new file mode 100644 index 0000000..0ac108b --- /dev/null +++ b/deploy/celery_short.service @@ -0,0 +1,41 @@ +[Unit] +Description=MediaCMS celery short queue +After=network.target + +[Service] +Type=forking +User=www-data +Group=www-data +Restart=always +RestartSec=10 +Environment=APP_DIR="/home/mediacms.io/mediacms" +Environment=CELERYD_NODES="short1 short2" +Environment=CELERY_QUEUE="short_tasks" +# Absolute or relative path to the 'celery' command: +Environment=CELERY_BIN="/home/mediacms.io/bin/celery" +# App instance to use +# comment out this line if you don't use an app +Environment=CELERY_APP="cms" +# or fully qualified: +#CELERY_APP="proj.tasks:app" +# How to call manage.py +Environment=CELERYD_MULTI="multi" +# Extra command-line arguments to the worker +Environment=CELERYD_OPTS="--soft-time-limit=300 -c10" +# - %n will be replaced with the first part of the nodename. +# - %I will be replaced with the current child process index +# and is important when using the prefork pool to avoid race conditions. +Environment=CELERYD_PID_FILE="/home/mediacms.io/mediacms/pids/%n.pid" +Environment=CELERYD_LOG_FILE="/home/mediacms.io/mediacms/logs/%N.log" +Environment=CELERYD_LOG_LEVEL="INFO" +Environment=APP_DIR="/home/mediacms.io/mediacms" + +ExecStart=/bin/sh -c '${CELERY_BIN} multi start ${CELERYD_NODES} -A ${CELERY_APP} --pidfile=${CELERYD_PID_FILE} --logfile=${CELERYD_LOG_FILE} --loglevel=${CELERYD_LOG_LEVEL} ${CELERYD_OPTS} --workdir=${APP_DIR} -Q ${CELERY_QUEUE}' + +ExecStop=/bin/sh -c '${CELERY_BIN} multi stopwait ${CELERYD_NODES} --pidfile=${CELERYD_PID_FILE}' + +ExecReload=/bin/sh -c '${CELERY_BIN} multi restart ${CELERYD_NODES} -A ${CELERY_APP} --pidfile=${CELERYD_PID_FILE} --logfile=${CELERYD_LOG_FILE} --loglevel=${CELERYD_LOG_LEVEL} ${CELERYD_OPTS} --workdir=${APP_DIR} -Q ${CELERY_QUEUE}' + +[Install] +WantedBy=multi-user.target + diff --git a/deploy/mediacms.io b/deploy/mediacms.io new file mode 100644 index 0000000..0a0b279 --- /dev/null +++ b/deploy/mediacms.io @@ -0,0 +1,78 @@ +server { + listen 80 ; + server_name localhost; + + gzip on; + access_log /var/log/nginx/mediacms.io.access.log; + + error_log /var/log/nginx/mediacms.io.error.log warn; + + # redirect to https if logged in + if ($http_cookie ~* "sessionid") { + rewrite ^/(.*)$ https://localhost/$1 permanent; + } + + # redirect basic forms to https + location ~ (login|login_form|register|mail_password_form)$ { + rewrite ^/(.*)$ https://localhost/$1 permanent; + } + + location /static { + alias /home/mediacms.io/mediacms/static ; + } + + location /media/original { + alias /home/mediacms.io/mediacms/media_files/original; + } + + location /media { + alias /home/mediacms.io/mediacms/media_files ; + } + + location / { + add_header 'Access-Control-Allow-Origin' '*'; + add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS'; + add_header 'Access-Control-Allow-Headers' 'DNT,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Range'; + add_header 'Access-Control-Expose-Headers' 'Content-Length,Content-Range'; + + include /etc/nginx/sites-enabled/uwsgi_params; + uwsgi_pass 127.0.0.1:9000; + } +} + +server { + listen 443 ssl; + server_name localhost; + + ssl_certificate_key /etc/letsencrypt/live/localhost/privkey.pem; + ssl_certificate /etc/letsencrypt/live/localhost/fullchain.pem; + + gzip on; + access_log /var/log/nginx/mediacms.io.access.log; + + error_log /var/log/nginx/mediacms.io.error.log warn; + + location /static { + alias /home/mediacms.io/mediacms/static ; + } + + location /media/original { + alias /home/mediacms.io/mediacms/media_files/original; + #auth_basic "auth protected area"; + #auth_basic_user_file /home/mediacms.io/mediacms/deploy/.htpasswd; + } + + location /media { + alias /home/mediacms.io/mediacms/media_files ; + } + + location / { + add_header 'Access-Control-Allow-Origin' '*'; + add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS'; + add_header 'Access-Control-Allow-Headers' 'DNT,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Range'; + add_header 'Access-Control-Expose-Headers' 'Content-Length,Content-Range'; + + include /etc/nginx/sites-enabled/uwsgi_params; + uwsgi_pass 127.0.0.1:9000; + } +} diff --git a/deploy/mediacms.io_fullchain.pem b/deploy/mediacms.io_fullchain.pem new file mode 100644 index 0000000..551ca6c --- /dev/null +++ b/deploy/mediacms.io_fullchain.pem @@ -0,0 +1,58 @@ +-----BEGIN CERTIFICATE----- +MIIFTjCCBDagAwIBAgISBNOUeDlerH9MkKmHLvZJeMYgMA0GCSqGSIb3DQEBCwUA +MEoxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1MZXQncyBFbmNyeXB0MSMwIQYDVQQD +ExpMZXQncyBFbmNyeXB0IEF1dGhvcml0eSBYMzAeFw0yMDAzMTAxNzUxNDFaFw0y +MDA2MDgxNzUxNDFaMBYxFDASBgNVBAMTC21lZGlhY21zLmlvMIIBIjANBgkqhkiG +9w0BAQEFAAOCAQ8AMIIBCgKCAQEAps5Jn18nW2tq/LYFDgQ1YZGLlpF/B2AAPvvH +3yuD+AcT4skKdZouVL/a5pXrptuYL5lthO9dlcja2tuO2ltYrb7Dp01dAIFaJE8O +DKd+Sv5wr8VWQZykqzMiMBgviml7TBvUHQjvCJg8UwmnN0XSUILCttd6u4qOzS7d +lKMMsKpYzLhElBT0rzhhsWulDiy6aAZbMV95bfR74nIWsBJacy6jx3jvxAuvCtkB +OVdOoVL6BPjDE3SNEk53bAZGIb5A9ri0O5jh/zBFT6tQSjUhAUTkmv9oZP547RnV +fDj+rdvCVk/fE+Jno36mcT183Qd/Ty3fWuqFoM5g/luhnfvWEwIDAQABo4ICYDCC +AlwwDgYDVR0PAQH/BAQDAgWgMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcD +AjAMBgNVHRMBAf8EAjAAMB0GA1UdDgQWBBTd5EZBt74zu5XxT1uXQs6oM8qOuDAf +BgNVHSMEGDAWgBSoSmpjBH3duubRObemRWXv86jsoTBvBggrBgEFBQcBAQRjMGEw +LgYIKwYBBQUHMAGGImh0dHA6Ly9vY3NwLmludC14My5sZXRzZW5jcnlwdC5vcmcw +LwYIKwYBBQUHMAKGI2h0dHA6Ly9jZXJ0LmludC14My5sZXRzZW5jcnlwdC5vcmcv +MBYGA1UdEQQPMA2CC21lZGlhY21zLmlvMEwGA1UdIARFMEMwCAYGZ4EMAQIBMDcG +CysGAQQBgt8TAQEBMCgwJgYIKwYBBQUHAgEWGmh0dHA6Ly9jcHMubGV0c2VuY3J5 +cHQub3JnMIIBBAYKKwYBBAHWeQIEAgSB9QSB8gDwAHYAXqdz+d9WwOe1Nkh90Eng +MnqRmgyEoRIShBh1loFxRVgAAAFwxcnL+AAABAMARzBFAiAb3yeBuW3j9MxcRc0T +icUBvEa/rH7Fv2eB0oQlnZ1exQIhAPf+CtTXmzxoeT/BBiivj4AmGDsq4xWhe/U6 +BytYrKLeAHYAB7dcG+V9aP/xsMYdIxXHuuZXfFeUt2ruvGE6GmnTohwAAAFwxcnM +HAAABAMARzBFAiAuP5gKyyaT0LVXxwjYD9zhezvxf4Icx0P9pk75c5ao+AIhAK0+ +fSJv+WTXciMT6gA1sk/tuCHuDFAuexSA/6TcRXcVMA0GCSqGSIb3DQEBCwUAA4IB +AQCPCYBU4Q/ro2MUkjDPKGmeqdxQycS4R9WvKTG/nmoahKNg30bnLaDPUcpyMU2k +sPDemdZ7uTGLZ3ZrlIva8DbrnJmrTPf9BMwaM6j+ZV/QhxvKZVIWkLkZrwiVI57X +Ba+rs5IEB4oWJ0EBaeIrzeKG5zLMkRcIdE4Hlhuwu3zGG56c+wmAPuvpIDlYoO6o +W22xRdxoTIHBvkzwonpVYUaRcaIw+48xnllxh1dHO+X69DT45wlF4tKveOUi+L50 +4GWJ8Vjv7Fot/WNHEM4Mnmw0jHj9TPkIZKnPNRMdHmJ5CF/FJFDiptOeuzbfohG+ +mdvuInb8JDc0XBE99Gf/S4/y +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEkjCCA3qgAwIBAgIQCgFBQgAAAVOFc2oLheynCDANBgkqhkiG9w0BAQsFADA/ +MSQwIgYDVQQKExtEaWdpdGFsIFNpZ25hdHVyZSBUcnVzdCBDby4xFzAVBgNVBAMT +DkRTVCBSb290IENBIFgzMB4XDTE2MDMxNzE2NDA0NloXDTIxMDMxNzE2NDA0Nlow +SjELMAkGA1UEBhMCVVMxFjAUBgNVBAoTDUxldCdzIEVuY3J5cHQxIzAhBgNVBAMT +GkxldCdzIEVuY3J5cHQgQXV0aG9yaXR5IFgzMIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEAnNMM8FrlLke3cl03g7NoYzDq1zUmGSXhvb418XCSL7e4S0EF +q6meNQhY7LEqxGiHC6PjdeTm86dicbp5gWAf15Gan/PQeGdxyGkOlZHP/uaZ6WA8 +SMx+yk13EiSdRxta67nsHjcAHJyse6cF6s5K671B5TaYucv9bTyWaN8jKkKQDIZ0 +Z8h/pZq4UmEUEz9l6YKHy9v6Dlb2honzhT+Xhq+w3Brvaw2VFn3EK6BlspkENnWA +a6xK8xuQSXgvopZPKiAlKQTGdMDQMc2PMTiVFrqoM7hD8bEfwzB/onkxEz0tNvjj +/PIzark5McWvxI0NHWQWM6r6hCm21AvA2H3DkwIDAQABo4IBfTCCAXkwEgYDVR0T +AQH/BAgwBgEB/wIBADAOBgNVHQ8BAf8EBAMCAYYwfwYIKwYBBQUHAQEEczBxMDIG +CCsGAQUFBzABhiZodHRwOi8vaXNyZy50cnVzdGlkLm9jc3AuaWRlbnRydXN0LmNv +bTA7BggrBgEFBQcwAoYvaHR0cDovL2FwcHMuaWRlbnRydXN0LmNvbS9yb290cy9k +c3Ryb290Y2F4My5wN2MwHwYDVR0jBBgwFoAUxKexpHsscfrb4UuQdf/EFWCFiRAw +VAYDVR0gBE0wSzAIBgZngQwBAgEwPwYLKwYBBAGC3xMBAQEwMDAuBggrBgEFBQcC +ARYiaHR0cDovL2Nwcy5yb290LXgxLmxldHNlbmNyeXB0Lm9yZzA8BgNVHR8ENTAz +MDGgL6AthitodHRwOi8vY3JsLmlkZW50cnVzdC5jb20vRFNUUk9PVENBWDNDUkwu +Y3JsMB0GA1UdDgQWBBSoSmpjBH3duubRObemRWXv86jsoTANBgkqhkiG9w0BAQsF +AAOCAQEA3TPXEfNjWDjdGBX7CVW+dla5cEilaUcne8IkCJLxWh9KEik3JHRRHGJo +uM2VcGfl96S8TihRzZvoroed6ti6WqEBmtzw3Wodatg+VyOeph4EYpr/1wXKtx8/ +wApIvJSwtmVi4MFU5aMqrSDE6ea73Mj2tcMyo5jMd6jmeWUHK8so/joWUoHOUgwu +X4Po1QYz+3dszkDqMp4fklxBwXRsW10KXzPMTZ+sOPAveyxindmjkW8lGy+QsRlG +PfZ+G6Z6h7mjem0Y+iWlkYcV4PIWL1iwBi8saCbGS5jN2p8M+X+Q7UNKEkROb3N6 +KOqkqm57TH2H3eDJAkSnh6/DNFu0Qg== +-----END CERTIFICATE----- diff --git a/deploy/mediacms.io_privkey.pem b/deploy/mediacms.io_privkey.pem new file mode 100644 index 0000000..d366f09 --- /dev/null +++ b/deploy/mediacms.io_privkey.pem @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQCmzkmfXydba2r8 +tgUOBDVhkYuWkX8HYAA++8ffK4P4BxPiyQp1mi5Uv9rmleum25gvmW2E712VyNra +247aW1itvsOnTV0AgVokTw4Mp35K/nCvxVZBnKSrMyIwGC+KaXtMG9QdCO8ImDxT +Cac3RdJQgsK213q7io7NLt2UowywqljMuESUFPSvOGGxa6UOLLpoBlsxX3lt9Hvi +chawElpzLqPHeO/EC68K2QE5V06hUvoE+MMTdI0STndsBkYhvkD2uLQ7mOH/MEVP +q1BKNSEBROSa/2hk/njtGdV8OP6t28JWT98T4mejfqZxPXzdB39PLd9a6oWgzmD+ +W6Gd+9YTAgMBAAECggEADnEJuryYQbf5GUwBAAepP3tEZJLQNqk/HDTcRxwTXuPt ++tKBD1F79WZu40vTjSyx7l0QOFQo/BDZsd0Ubx89fD1p3xA5nxOT5FTb2IifzIpe +4zjokOGo+BGDQjq10vvy6tH1+VWOrGXRwzawvX5UCRhpFz9sptQGLQmDsZy0Oo9B +LtavYVUqsbyqRWlzaclHgbythegIACWkqcalOzOtx+l6TGBRjej+c7URcwYBfr7t +XTAzbP+vnpaJovZyZT1eekr0OLzMpnjx4HvRvzL+NxauRpn6KfabsTfZlk8nrs4I +UdSjeukj1Iz8rGQilHdN/4dVJ3KzrlHVkVTBSjmMUQKBgQDaVXZnhAScfdiKeZbO +rdUAWcnwfkDghtRuAmzHaRM/FhFBEoVhdSbBuu+OUyBnIw/Ra4o2ePuEBcKIUiQO +w2tnE1CY5PPAcjw+OCSpvzy5xxjaqaRbm9BJp3FTeEYGLXERnchPpHg/NpexuF22 +QOJ+FrysPyNMxuQp47ZwO9WT3QKBgQDDlSGjq/eeWxemwf7ZqMVlRyqsdJsgnCew +DkC62IGiYCBDfeEmndN+vcA/uzJHYV4iXiqS3aYJCWGaZFMhdIhIn5MgULvO1j5G +u/MxuzaaNPz22FlNCWTLBw4T1HOOvyTL+nLtZDKJ/BHxgHCmur1kiGvvZWrcCthD +afLEmseqrwKBgBuLZKCymxJTHhp6NHhmndSpfzyD8RNibzJhw+90ZiUzV4HqIEGn +Ufhm6Qn/mrroRXqaIpm0saZ6Q4yHMF1cchRS73wahlXlE4yV8KopojOd1pjfhgi4 +o5JnOXjaV5s36GfcjATgLvtqm8CkDc6MaQaXP75LSNzKysYuIDoQkmVRAoGAAghF +rja2Pv4BU+lGJarcSj4gEmSvy/nza5/qSka/qhlHnIvtUAJp1TJRkhf24MkBOmgy +Fw6YkBV53ynVt05HsEGAPOC54t9VDFUdpNGmMpoEWuhKnUNQuc9b9RbLEJup3TjA +Avl8kPR+lzzXbtQX7biBLp6mKp0uPB0YubRGCN8CgYA0JMxK0x38Q2x3AQVhOmZh +YubtIa0JqVJhvpweOCFnkq3ebBpLsWYwiLTn86vuD0jupe5M3sxtefjkJmAKd8xY +aBU7QWhjh1fX4mzmggnbjcrIFbkIHsxwMeg567U/4AGxOOUsv9QUn37mqycqRKEn +YfUyYNLM6F3MmQAOs2kaHw== +-----END PRIVATE KEY----- diff --git a/deploy/mediacms.service b/deploy/mediacms.service new file mode 100644 index 0000000..4d81e53 --- /dev/null +++ b/deploy/mediacms.service @@ -0,0 +1,13 @@ +[Unit] +Description=MediaCMS uwsgi + +[Service] +ExecStart=/home/mediacms.io/bin/uwsgi --ini /home/mediacms.io/mediacms/uwsgi.ini +ExecStop=/usr/bin/killall -9 uwsgi +RestartSec=3 +#ExecRestart=killall -9 uwsgi; sleep 5; /home/sss/bin/uwsgi --ini /home/sss/wordgames/uwsgi.ini +Restart=always + + +[Install] +WantedBy=multi-user.target diff --git a/deploy/mediacms_logrorate b/deploy/mediacms_logrorate new file mode 100644 index 0000000..c4fc3c6 --- /dev/null +++ b/deploy/mediacms_logrorate @@ -0,0 +1,7 @@ +/home/mediacms.io/mediacms/logs/*.log { + weekly + missingok + rotate 7 + compress + notifempty +} diff --git a/deploy/nginx.conf b/deploy/nginx.conf new file mode 100644 index 0000000..1dda610 --- /dev/null +++ b/deploy/nginx.conf @@ -0,0 +1,41 @@ +user www-data; +worker_processes auto; +pid /run/nginx.pid; + +events { + worker_connections 10240; +} + + worker_rlimit_nofile 20000; #each connection needs a filehandle (or 2 if you are proxying) +http { + proxy_connect_timeout 75; + proxy_read_timeout 12000; + client_max_body_size 5800M; + sendfile on; + tcp_nopush on; + tcp_nodelay on; + keepalive_timeout 10; + types_hash_max_size 2048; + + include /etc/nginx/mime.types; + default_type application/octet-stream; + + ssl_protocols TLSv1 TLSv1.1 TLSv1.2; # Dropping SSLv3, ref: POODLE + ssl_prefer_server_ciphers on; + + access_log /var/log/nginx/access.log; + error_log /var/log/nginx/error.log; + + gzip on; + gzip_disable "msie6"; + + log_format compression '$remote_addr - $remote_user [$time_local] ' + '"$request" $status $body_bytes_sent ' + '"$http_referer" "$http_user_agent" "$gzip_ratio"'; + + gzip_types text/plain text/css application/json application/javascript text/xml application/xml application/xml+rss text/javascript; + + include /etc/nginx/conf.d/*.conf; + include /etc/nginx/sites-enabled/*; +} + diff --git a/deploy/uwsgi.ini b/deploy/uwsgi.ini new file mode 100644 index 0000000..b38fe84 --- /dev/null +++ b/deploy/uwsgi.ini @@ -0,0 +1,19 @@ +[uwsgi] + +chdir = /home/mediacms.io/mediacms/ +virtualenv = /home/mediacms.io +module = cms.wsgi + +uid = www-data +gid = www-data + +processes = 10 +threads = 10 +master = true +workers = 8 +vacuum = true + +socket = 127.0.0.1:9000 + +logto = /home/mediacms.io/mediacms/logs/errorlog.txt + diff --git a/deploy/uwsgi_params b/deploy/uwsgi_params new file mode 100644 index 0000000..5abf809 --- /dev/null +++ b/deploy/uwsgi_params @@ -0,0 +1,16 @@ +uwsgi_param QUERY_STRING $query_string; +uwsgi_param REQUEST_METHOD $request_method; +uwsgi_param CONTENT_TYPE $content_type; +uwsgi_param CONTENT_LENGTH $content_length; + +uwsgi_param REQUEST_URI $request_uri; +uwsgi_param PATH_INFO $document_uri; +uwsgi_param DOCUMENT_ROOT $document_root; +uwsgi_param SERVER_PROTOCOL $server_protocol; +uwsgi_param REQUEST_SCHEME $scheme; +uwsgi_param HTTPS $https if_not_empty; + +uwsgi_param REMOTE_ADDR $remote_addr; +uwsgi_param REMOTE_PORT $remote_port; +uwsgi_param SERVER_PORT $server_port; +uwsgi_param SERVER_NAME $server_name; diff --git a/docs/Configuration.md b/docs/Configuration.md new file mode 100644 index 0000000..78dab51 --- /dev/null +++ b/docs/Configuration.md @@ -0,0 +1,244 @@ +## Configuration + +A number of options are available on `cms/settings.py`. + +It is advisable to override any of them by adding it to `cms/local_settings.py` . + +Any change needs restart of MediaCMS in order to take effect. So edit `cms/local_settings.py`, make a change and restart MediaCMS + +``` +#systectl restart mediacms +``` + + +### change portal logo + +Set a new svg file for the white theme (`static/images/logo_dark.svg`) or the dark theme (`static/images/logo_light.svg`) + +### set global portal title + +set `PORTAL_NAME`, eg + +``` +PORTAL_NAME = 'my awesome portal' +``` + +### who can add media + +By default `CAN_ADD_MEDIA = "all"` means that all registered users can add media. Other valid options are: + +- **email_verified**, a user not only has to register an account but also verify the email (by clicking the link sent upon registration). Apparently email configuration need to work, otherise users won't receive emails. + +- **advancedUser**, only users that are marked as advanced users can add media. Admins or MediaCMS managers can make users advanced users by editing their profile and selecting advancedUser. + +### what is the portal workflow + +The `PORTAL_WORKFLOW` variable specifies what happens to newly uploaded media, whether they appear on listings (as the index page, or search) + +- **public** is the default option and means that a media can appear on listings. If media type is video, it will appear once at least a task that produces an encoded version of the file has finished succesfully. For other type of files, as image/audio they appear instantly + +- **private** means that newly uploaded content is private - only users can see it or MediaCMS editors, managers and admins. Those can also set the status to public or unlisted + +- **unlisted** means that items are unlisted. However if a user visits the url of an unlisted media, it will be shown (as opposed to private) + + +### show/hide the Sign in button + +to show button: +``` +LOGIN_ALLOWED = True +``` + +to hide button: + +``` +LOGIN_ALLOWED = False +``` + +### show/hide the Register button + +to show button: +``` +REGISTER_ALLOWED = True +``` + +to hide button: + +``` +REGISTER_ALLOWED = False +``` + + +### show/hide the upload media button + +To show: + +``` +UPLOAD_MEDIA_ALLOWED = True +``` + +To hide: + +``` +UPLOAD_MEDIA_ALLOWED = False +``` + +### show/hide the actions buttons (like/dislike/report) + +Make changes (True/False) to any of the following: + +``` +- CAN_LIKE_MEDIA = True # whether the like media appears +- CAN_DISLIKE_MEDIA = True # whether the dislike media appears +- CAN_REPORT_MEDIA = True # whether the report media appears +- CAN_SHARE_MEDIA = True # whether the share media appears +``` + +### automatically hide media upon being reported + +set a low number for variable `REPORTED_TIMES_THRESHOLD` +eg + +``` +REPORTED_TIMES_THRESHOLD = 2 +``` + +once the limit is reached, media goes to private state and an email is sent to admins + +### set a custom message on the media upload page + +this message will appear below the media drag and drop form + +``` +PRE_UPLOAD_MEDIA_MESSAGE = 'custom message' +``` + +### set email settings + +Set correct settings per provider + +``` +DEFAULT_FROM_EMAIL = 'info@mediacms.io' +EMAIL_HOST_PASSWORD = 'xyz' +EMAIL_HOST_USER = 'info@mediacms.io' +EMAIL_USE_TLS = True +SERVER_EMAIL = DEFAULT_FROM_EMAIL +EMAIL_HOST = 'mediacms.io' +EMAIL_PORT = 587 +ADMIN_EMAIL_LIST = ['info@mediacms.io'] +``` + +### disallow user registrations from specific domains + +set domains that are not valid for registration via this variable: + +``` +RESTRICTED_DOMAINS_FOR_USER_REGISTRATION = [ + 'xxx.com', 'emaildomainwhatever.com'] +``` + +### require a review by MediaCMS editors/managers/admins + +set value + +``` +MEDIA_IS_REVIEWED = False +``` + +any uploaded media now needs to be reviewed before it can appear to the listings. +MediaCMS editors/managers/admins can visit the media page and edit it, where they can see the option to mark media as reviewed. By default this is set to True, so all media don't require to be reviewed + +### specify maximum number of media for a playlist + +set a different threshold on variable `MAX_MEDIA_PER_PLAYLIST` + +eg + +``` +MAX_MEDIA_PER_PLAYLIST = 14 +``` + +### specify maximum size of a media that can be uploaded + +change `UPLOAD_MAX_SIZE`. + +default is 4GB + +``` +UPLOAD_MAX_SIZE = 800 * 1024 * 1000 * 5 +``` + +### specify maximum size of comments + +change `MAX_CHARS_FOR_COMMENT` + +default: + +``` +MAX_CHARS_FOR_COMMENT = 10000 +``` + +### how many files to upload in parallel + +set a different threshold for `UPLOAD_MAX_FILES_NUMBER` +default: + +``` +UPLOAD_MAX_FILES_NUMBER = 100 +``` + +### force users confirm their email upon registrations + +default option for email confirmation is optional. Set this to mandatory in order to force users confirm their email before they can login + +``` +ACCOUNT_EMAIL_VERIFICATION = 'optional' +``` + +### rate limit account login attempts + +after this number is reached + +``` +ACCOUNT_LOGIN_ATTEMPTS_LIMIT = 20 +``` + +sets a timeout (in seconds) + +``` +ACCOUNT_LOGIN_ATTEMPTS_TIMEOUT = 5 +``` + +### disallow user registration + +set the following variable to False + +``` +USERS_CAN_SELF_REGISTER = True +``` + +### configure notifications + +Global notifications that are implemented are controlled by the following options: + +``` +USERS_NOTIFICATIONS = { + 'MEDIA_ADDED': True, +} +``` + +If you want to disable notification for new media, set to False + +Admins also receive notifications on different events, set any of the following to False to disable + +``` +ADMINS_NOTIFICATIONS = { + 'NEW_USER': True, + 'MEDIA_ADDED': True, + 'MEDIA_REPORTED': True, +} +``` + +- NEW_USER: a new user is added +- MEDIA_ADDED: a media is added +- MEDIA_REPORTED: the report for a media was hit diff --git a/docs/User_Scenarios.md b/docs/User_Scenarios.md new file mode 100644 index 0000000..206314d --- /dev/null +++ b/docs/User_Scenarios.md @@ -0,0 +1,20 @@ +## User scenarios to test + + +## test video media + image + try uploading a video + image, make sure they get encoded well and check they appear on index/search/category/author page + try editing/setting metadata, confirm action is performed, also that are searchable + try adding custom poster, confirm it loads well on video page/listings + try specifying different thumbnail time, confirm an automatic screenshot is taken + + +## portal workflow + change workflow to unlisted, check they don't appear on index/search/category/author page + +## users management + create an admin, a MediaCMS editor and MediaCMS manager. All should see edit/delete on a media and also comments, and action should work. + For users edit and delete, only MediaCMS manager and admin should see edit/delete and these actions should work. + +## test subtitle + add language and test subtitling + diff --git a/docs/images/embed.jpg b/docs/images/embed.jpg new file mode 100644 index 0000000..5ffbd44 Binary files /dev/null and b/docs/images/embed.jpg differ diff --git a/docs/images/index.jpg b/docs/images/index.jpg new file mode 100644 index 0000000..668d918 Binary files /dev/null and b/docs/images/index.jpg differ diff --git a/docs/images/video.jpg b/docs/images/video.jpg new file mode 100644 index 0000000..fb71b39 Binary files /dev/null and b/docs/images/video.jpg differ diff --git a/files/__init__.py b/files/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/files/admin.py b/files/admin.py new file mode 100644 index 0000000..65ce937 --- /dev/null +++ b/files/admin.py @@ -0,0 +1,87 @@ +from django.contrib import admin + +from .models import ( + Media, + Encoding, + EncodeProfile, + Category, + Comment, + Tag, + Language, + Subtitle, +) + + +class CommentAdmin(admin.ModelAdmin): + search_fields = ["text"] + list_display = ["text", "add_date", "user", "media"] + ordering = ("-add_date",) + readonly_fields = ("user", "media", "parent") + + +class MediaAdmin(admin.ModelAdmin): + search_fields = ["title"] + list_display = [ + "title", + "user", + "add_date", + "media_type", + "duration", + "state", + "is_reviewed", + "encoding_status", + "featured", + "get_comments_count", + ] + list_filter = ["state", "is_reviewed", "encoding_status", "featured", "category"] + ordering = ("-add_date",) + readonly_fields = ("user", "tags", "category", "channel") + + def get_comments_count(self, obj): + return obj.comments.count() + + get_comments_count.short_description = "Comments count" + + +class CategoryAdmin(admin.ModelAdmin): + search_fields = ["title"] + list_display = ["title", "user", "add_date", "is_global", "media_count"] + list_filter = ["is_global"] + ordering = ("-add_date",) + readonly_fields = ("user", "media_count") + + +class TagAdmin(admin.ModelAdmin): + search_fields = ["title"] + list_display = ["title", "user", "media_count"] + readonly_fields = ("user", "media_count") + + +class EncodeProfileAdmin(admin.ModelAdmin): + list_display = ("name", "extension", "resolution", "codec", "description", "active") + list_filter = ["extension", "resolution", "codec", "active"] + search_fields = ["name", "extension", "resolution", "codec", "description"] + list_per_page = 100 + fields = ("name", "extension", "resolution", "codec", "description", "active") + + +class LanguageAdmin(admin.ModelAdmin): + pass + + +class SubtitleAdmin(admin.ModelAdmin): + pass + + +class EncodingAdmin(admin.ModelAdmin): + pass + + +admin.site.register(EncodeProfile, EncodeProfileAdmin) +admin.site.register(Comment, CommentAdmin) +admin.site.register(Media, MediaAdmin) +admin.site.register(Encoding, EncodingAdmin) +admin.site.register(Category, CategoryAdmin) +admin.site.register(Tag, TagAdmin) +admin.site.register(Subtitle, SubtitleAdmin) +admin.site.register(Language, LanguageAdmin) diff --git a/files/apps.py b/files/apps.py new file mode 100644 index 0000000..b742dfd --- /dev/null +++ b/files/apps.py @@ -0,0 +1,5 @@ +from django.apps import AppConfig + + +class FilesConfig(AppConfig): + name = "files" diff --git a/files/backends.py b/files/backends.py new file mode 100644 index 0000000..87cdadc --- /dev/null +++ b/files/backends.py @@ -0,0 +1,77 @@ +# ffmpeg only backend + +from subprocess import PIPE, Popen +import locale +import re +import logging + +logger = logging.getLogger(__name__) + + +class VideoEncodingError(Exception): + def __init__(self, *args, **kwargs): + self.message = args[0] + super(VideoEncodingError, self).__init__(*args, **kwargs) + + +RE_TIMECODE = re.compile(r"time=(\d+:\d+:\d+.\d+)") +console_encoding = locale.getdefaultlocale()[1] or "UTF-8" + + +class FFmpegBackend(object): + name = "FFmpeg" + + def __init__(self): + pass + + def _spawn(self, cmd): + try: + return Popen( + cmd, + shell=False, + stdin=PIPE, + stdout=PIPE, + stderr=PIPE, + close_fds=True, + ) + except OSError as e: + raise VideoEncodingError("Error while running ffmpeg", e) + + def _check_returncode(self, process): + ret = {} + stdout, stderr = process.communicate() + ret["code"] = process.returncode + return ret + + def encode(self, cmd): + process = self._spawn(cmd) + buf = output = "" + while True: + out = process.stderr.read(10) + + if not out: + break + try: + out = out.decode(console_encoding) + except UnicodeDecodeError: + out = "" + output = output[-500:] + out + buf = buf[-500:] + out + try: + line, buf = buf.split("\r", 1) + except BaseException: + continue + + progress = RE_TIMECODE.findall(line) + if progress: + progress = progress[0] + yield progress + + process_check = self._check_returncode(process) + if process_check["code"] != 0: + raise VideoEncodingError(output[-1000:]) # output could be huge + + if not output: + raise VideoEncodingError("No output from FFmpeg.") + + yield output[-1000:] # output could be huge diff --git a/files/context_processors.py b/files/context_processors.py new file mode 100644 index 0000000..0fd883f --- /dev/null +++ b/files/context_processors.py @@ -0,0 +1,40 @@ +from django.conf import settings +from .methods import is_mediacms_editor, is_mediacms_manager + + +def stuff(request): + """Pass settings to the frontend""" + ret = {} + if request.is_secure(): + # in case session is https, pass this setting so + # that the frontend uses https too + ret["FRONTEND_HOST"] = settings.SSL_FRONTEND_HOST + else: + ret["FRONTEND_HOST"] = settings.FRONTEND_HOST + ret["DEFAULT_THEME"] = settings.DEFAULT_THEME + ret["PORTAL_NAME"] = settings.PORTAL_NAME + ret["LOAD_FROM_CDN"] = settings.LOAD_FROM_CDN + ret["CAN_LOGIN"] = settings.LOGIN_ALLOWED + ret["CAN_REGISTER"] = settings.REGISTER_ALLOWED + ret["CAN_UPLOAD_MEDIA"] = settings.UPLOAD_MEDIA_ALLOWED + ret["CAN_LIKE_MEDIA"] = settings.CAN_LIKE_MEDIA + ret["CAN_DISLIKE_MEDIA"] = settings.CAN_DISLIKE_MEDIA + ret["CAN_REPORT_MEDIA"] = settings.CAN_REPORT_MEDIA + ret["CAN_SHARE_MEDIA"] = settings.CAN_SHARE_MEDIA + ret["UPLOAD_MAX_SIZE"] = settings.UPLOAD_MAX_SIZE + ret["UPLOAD_MAX_FILES_NUMBER"] = settings.UPLOAD_MAX_FILES_NUMBER + ret["PRE_UPLOAD_MEDIA_MESSAGE"] = settings.PRE_UPLOAD_MEDIA_MESSAGE + ret[ + "POST_UPLOAD_AUTHOR_MESSAGE_UNLISTED_NO_COMMENTARY" + ] = settings.POST_UPLOAD_AUTHOR_MESSAGE_UNLISTED_NO_COMMENTARY + ret["IS_MEDIACMS_ADMIN"] = request.user.is_superuser + ret["IS_MEDIACMS_EDITOR"] = is_mediacms_editor(request.user) + ret["IS_MEDIACMS_MANAGER"] = is_mediacms_manager(request.user) + ret["ALLOW_RATINGS"] = settings.ALLOW_RATINGS + ret[ + "ALLOW_RATINGS_CONFIRMED_EMAIL_ONLY" + ] = settings.ALLOW_RATINGS_CONFIRMED_EMAIL_ONLY + ret[ + "VIDEO_PLAYER_FEATURED_VIDEO_ON_INDEX_PAGE" + ] = settings.VIDEO_PLAYER_FEATURED_VIDEO_ON_INDEX_PAGE + return ret diff --git a/files/exceptions.py b/files/exceptions.py new file mode 100644 index 0000000..315f589 --- /dev/null +++ b/files/exceptions.py @@ -0,0 +1,4 @@ +class VideoEncodingError(Exception): + def __init__(self, *args, **kwargs): + self.message = args[0] + super(VideoEncodingError, self).__init__(*args, **kwargs) diff --git a/files/feeds.py b/files/feeds.py new file mode 100644 index 0000000..46aedb5 --- /dev/null +++ b/files/feeds.py @@ -0,0 +1,26 @@ +from django.contrib.syndication.views import Feed +from django.urls import reverse +from django.db.models import Q + +from .models import Media + + +class RssMediaFeed(Feed): + title = "Latest Media" + link = "/media" + description = "Latest Media RSS feed" + + def items(self): + basic_query = Q(listable=True) + media = Media.objects.filter(basic_query).order_by("-add_date") + media = media.prefetch_related("user") + return media[:40] + + def item_title(self, item): + return item.title + + def item_description(self, item): + return item.description + + def item_link(self, item): + return reverse("get_media") + "?m={0}".format(item.friendly_token) diff --git a/files/forms.py b/files/forms.py new file mode 100644 index 0000000..79c207f --- /dev/null +++ b/files/forms.py @@ -0,0 +1,95 @@ +from django import forms +from .models import Media, Subtitle +from .methods import is_mediacms_editor, get_next_state + + +class MultipleSelect(forms.CheckboxSelectMultiple): + input_type = "checkbox" + + +class MediaForm(forms.ModelForm): + new_tags = forms.CharField( + label="Tags", help_text="a comma separated list of new tags.", required=False + ) + + class Meta: + model = Media + fields = ( + "title", + "category", + "new_tags", + "add_date", + "uploaded_poster", + "description", + "state", + "enable_comments", + "featured", + "thumbnail_time", + "reported_times", + "is_reviewed", + ) + widgets = { + "tags": MultipleSelect(), + } + + def __init__(self, user, *args, **kwargs): + self.user = user + super(MediaForm, self).__init__(*args, **kwargs) + if self.instance.media_type != "video": + self.fields.pop("thumbnail_time") + if not is_mediacms_editor(user): + self.fields.pop("featured") + self.fields.pop("reported_times") + self.fields.pop("is_reviewed") + self.fields["new_tags"].initial = ", ".join( + [tag.title for tag in self.instance.tags.all()] + ) + + def clean_uploaded_poster(self): + image = self.cleaned_data.get("uploaded_poster", False) + if image: + if image.size > 5 * 1024 * 1024: + raise forms.ValidationError("Image file too large ( > 5mb )") + return image + + def save(self, *args, **kwargs): + data = self.cleaned_data + state = data.get("state") + if state != self.initial["state"]: + self.instance.state = get_next_state( + self.user, self.initial["state"], self.instance.state + ) + + media = super(MediaForm, self).save(*args, **kwargs) + return media + + +class SubtitleForm(forms.ModelForm): + class Meta: + model = Subtitle + fields = ["language", "subtitle_file"] + + def __init__(self, media_item, *args, **kwargs): + super(SubtitleForm, self).__init__(*args, **kwargs) + self.instance.media = media_item + + def save(self, *args, **kwargs): + self.instance.user = self.instance.media.user + media = super(SubtitleForm, self).save(*args, **kwargs) + return media + + +class ContactForm(forms.Form): + from_email = forms.EmailField(required=True) + name = forms.CharField(required=False) + message = forms.CharField(widget=forms.Textarea, required=True) + + def __init__(self, user, *args, **kwargs): + super(ContactForm, self).__init__(*args, **kwargs) + self.fields["name"].label = "Your name:" + self.fields["from_email"].label = "Your email:" + self.fields["message"].label = "Please add your message here and submit:" + self.user = user + if user.is_authenticated: + self.fields.pop("name") + self.fields.pop("from_email") diff --git a/files/helpers.py b/files/helpers.py new file mode 100644 index 0000000..e4c056c --- /dev/null +++ b/files/helpers.py @@ -0,0 +1,754 @@ +# Kudos to Werner Robitza, AVEQ GmbH, for helping with ffmpeg +# related content + +import os +import math +import shutil +import tempfile +import random +import hashlib +import subprocess +import json +from fractions import Fraction +import filetype +from django.conf import settings + + +CHARS = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" + +CRF_ENCODING_NUM_SECONDS = 2 # 0 * 60 # videos with greater duration will get +# CRF encoding and not two-pass +# Encoding individual chunks may yield quality variations if you use a +# too low bitrate, so if you go for the chunk-based variant +# you should use CRF encoding. + +MAX_RATE_MULTIPLIER = 1.5 +BUF_SIZE_MULTIPLIER = 1.5 + +# in seconds, anything between 2 and 6 makes sense +KEYFRAME_DISTANCE = 4 +KEYFRAME_DISTANCE_MIN = 2 + +# speed presets +# see https://trac.ffmpeg.org/wiki/Encode/H.264 +X26x_PRESET = "medium" # "medium" +X265_PRESET = "medium" +X26x_PRESET_BIG_HEIGHT = "faster" + +# VP9_SPEED = 1 # between 0 and 4, lower is slower +VP9_SPEED = 2 + + +VIDEO_CRFS = { + "h264_baseline": 23, + "h264": 23, + "h265": 28, + "vp9": 32, +} + +# video rates for 25 or 60 fps input, for different codecs, in kbps +VIDEO_BITRATES = { + "h264": { + 25: { + 240: 300, + 360: 500, + 480: 1000, + 720: 2500, + 1080: 4500, + 1440: 9000, + 2160: 18000, + }, + 60: {720: 3500, 1080: 7500, 1440: 18000, 2160: 40000}, + }, + "h265": { + 25: { + 240: 150, + 360: 275, + 480: 500, + 720: 1024, + 1080: 1800, + 1440: 4500, + 2160: 10000, + }, + 60: {720: 1800, 1080: 3000, 1440: 8000, 2160: 18000}, + }, + "vp9": { + 25: { + 240: 150, + 360: 275, + 480: 500, + 720: 1024, + 1080: 1800, + 1440: 4500, + 2160: 10000, + }, + 60: {720: 1800, 1080: 3000, 1440: 8000, 2160: 18000}, + }, +} + + +AUDIO_ENCODERS = {"h264": "aac", "h265": "aac", "vp9": "libopus"} + +AUDIO_BITRATES = {"h264": 128, "h265": 128, "vp9": 96} + +EXTENSIONS = {"h264": "mp4", "h265": "mp4", "vp9": "webm"} + +VIDEO_PROFILES = {"h264": "main", "h265": "main"} + + +def get_portal_workflow(): + return settings.PORTAL_WORKFLOW + + +def get_default_state(user=None): + # possible states given the portal workflow setting + state = "private" + if settings.PORTAL_WORKFLOW == "public": + state = "public" + if settings.PORTAL_WORKFLOW == "unlisted": + state = "unlisted" + if settings.PORTAL_WORKFLOW == "private_verified": + if user and user.advancedUser: + state = "unlisted" + return state + + +def get_file_name(filename): + return filename.split("/")[-1] + + +def get_file_type(filename): + if not os.path.exists(filename): + return None + file_type = None + kind = filetype.guess(filename) + if kind is not None: + if kind.mime.startswith("video"): + file_type = "video" + elif kind.mime.startswith("image"): + file_type = "image" + elif kind.mime.startswith("audio"): + file_type = "audio" + elif "pdf" in kind.mime: + file_type = "pdf" + else: + # TODO: do something for files not supported by filetype lib + pass + return file_type + + +def rm_file(filename): + if os.path.isfile(filename): + try: + os.remove(filename) + return True + except OSError: + pass + return False + + +def rm_files(filenames): + if isinstance(filenames, list): + for filename in filenames: + rm_file(filename) + return True + + +def rm_dir(directory): + if os.path.isdir(directory): + # refuse to delete a dir inside project BASE_DIR + if directory.startswith(settings.BASE_DIR): + try: + shutil.rmtree(directory) + return True + except (FileNotFoundError, PermissionError): + pass + return False + + +def url_from_path(filename): + # TODO: find a way to preserver http - https ... + return "{0}{1}".format( + settings.MEDIA_URL, filename.replace(settings.MEDIA_ROOT, "") + ) + + +def create_temp_file(suffix=None, dir=settings.TEMP_DIRECTORY): + tf = tempfile.NamedTemporaryFile(delete=False, suffix=suffix, dir=dir) + return tf.name + + +def create_temp_dir(suffix=None, dir=settings.TEMP_DIRECTORY): + td = tempfile.mkdtemp(dir=dir) + return td + + +def produce_friendly_token(token_len=settings.FRIENDLY_TOKEN_LEN): + token = "" + while len(token) != token_len: + token += CHARS[random.randint(0, len(CHARS) - 1)] + return token + + +def clean_friendly_token(token): + # cleans token + for char in token: + if char not in CHARS: + token.replace(char, "") + return token + + +def mask_ip(ip_address): + return hashlib.md5(ip_address.encode("utf-8")).hexdigest() + + +def run_command(cmd, cwd=None): + """ + Run a command directly + """ + if isinstance(cmd, str): + cmd = cmd.split() + ret = {} + if cwd: + process = subprocess.Popen( + cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd + ) + else: + process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + stdout, stderr = process.communicate() + # TODO: catch unicodedecodeerrors here... + if process.returncode == 0: + try: + ret["out"] = stdout.decode("utf-8") + except BaseException: + ret["out"] = "" + try: + ret["error"] = stderr.decode("utf-8") + except BaseException: + ret["error"] = "" + else: + try: + ret["error"] = stderr.decode("utf-8") + except BaseException: + ret["error"] = "" + return ret + + +def media_file_info(input_file): + """ + Get the info about an input file, as determined by ffprobe + + Returns a dict, with the keys: + - `filename`: Filename + - `file_size`: Size of the file in bytes + - `video_duration`: Duration of the video in `s.msec` + - `video_frame_rate`: Framerate in Hz + - `video_bitrate`: Bitrate of the video stream in kBit/s + - `video_width`: Width in pixels + - `video_height`: Height in pixels + - `video_codec`: Video codec + - `audio_duration`: Duration of the audio in `s.msec` + - `audio_sample_rate`: Audio sample rate in Hz + - `audio_codec`: Audio codec name (`aac`) + - `audio_bitrate`: Bitrate of the video stream in kBit/s + + Also returns the video and audio info raw from ffprobe. + """ + ret = {} + + if not os.path.isfile(input_file): + ret["fail"] = True + return ret + + video_info = {} + audio_info = {} + cmd = ["stat", "-c", "%s", input_file] + + stdout = run_command(cmd).get("out") + if stdout: + file_size = int(stdout.strip()) + else: + ret["fail"] = True + return ret + + cmd = ["md5sum", input_file] + stdout = run_command(cmd).get("out") + if stdout: + md5sum = stdout.split()[0] + else: + md5sum = "" + + cmd = [ + settings.FFPROBE_COMMAND, + "-loglevel", + "error", + "-show_streams", + "-show_entries", + "format=format_name", + "-of", + "json", + input_file, + ] + stdout = run_command(cmd).get("out") + try: + info = json.loads(stdout) + except TypeError: + ret["fail"] = True + return ret + + has_video = False + has_audio = False + for stream_info in info["streams"]: + if stream_info["codec_type"] == "video": + video_info = stream_info + has_video = True + if info.get("format") and info["format"].get("format_name", "") in [ + "tty", + "image2", + "image2pipe", + "bin", + "png_pipe", + "gif", + ]: + ret["fail"] = True + return ret + elif stream_info["codec_type"] == "audio": + audio_info = stream_info + has_audio = True + + if not has_video: + ret["is_video"] = False + ret["is_audio"] = has_audio + ret["audio_info"] = audio_info + return ret + + if "duration" in video_info.keys(): + video_duration = float(video_info["duration"]) + elif "tags" in video_info.keys() and "DURATION" in video_info["tags"]: + duration_str = video_info["tags"]["DURATION"] + try: + hms, msec = duration_str.split(".") + except ValueError: + hms, msec = duration_str.split(",") + + total_dur = sum( + int(x) * 60 ** i for i, x in enumerate(reversed(hms.split(":"))) + ) + video_duration = total_dur + float("0." + msec) + else: + # fallback to format, eg for webm + cmd = [ + settings.FFPROBE_COMMAND, + "-loglevel", + "error", + "-show_format", + "-of", + "json", + input_file, + ] + stdout = run_command(cmd).get("out") + format_info = json.loads(stdout)["format"] + try: + video_duration = float(format_info["duration"]) + except KeyError: + ret["fail"] = True + return ret + + if "bit_rate" in video_info.keys(): + video_bitrate = round(float(video_info["bit_rate"]) / 1024.0, 2) + else: + cmd = [ + settings.FFPROBE_COMMAND, + "-loglevel", + "error", + "-select_streams", + "v", + "-show_entries", + "packet=size", + "-of", + "compact=p=0:nk=1", + input_file, + ] + stdout = run_command(cmd).get("out") + stream_size = sum([int(l) for l in stdout.split("\n") if l != ""]) + video_bitrate = round((stream_size * 8 / 1024.0) / video_duration, 2) + + ret = { + "filename": input_file, + "file_size": file_size, + "video_duration": video_duration, + "video_frame_rate": float(Fraction(video_info["r_frame_rate"])), + "video_bitrate": video_bitrate, + "video_width": video_info["width"], + "video_height": video_info["height"], + "video_codec": video_info["codec_name"], + "has_video": has_video, + "has_audio": has_audio, + } + + if has_audio: + audio_duration = 1 + if "duration" in audio_info.keys(): + audio_duration = float(audio_info["duration"]) + elif "tags" in audio_info.keys() and "DURATION" in audio_info["tags"]: + duration_str = audio_info["tags"]["DURATION"] + try: + hms, msec = duration_str.split(".") + except ValueError: + hms, msec = duration_str.split(",") + total_dur = sum( + int(x) * 60 ** i for i, x in enumerate(reversed(hms.split(":"))) + ) + audio_duration = total_dur + float("0." + msec) + else: + # fallback to format, eg for webm + cmd = [ + settings.FFPROBE_COMMAND, + "-loglevel", + "error", + "-show_format", + "-of", + "json", + input_file, + ] + stdout = run_command(cmd).get("out") + format_info = json.loads(stdout)["format"] + audio_duration = float(format_info["duration"]) + + if "bit_rate" in audio_info.keys(): + audio_bitrate = round(float(audio_info["bit_rate"]) / 1024.0, 2) + else: + # fall back to calculating from accumulated frame duration + cmd = [ + settings.FFPROBE_COMMAND, + "-loglevel", + "error", + "-select_streams", + "a", + "-show_entries", + "packet=size", + "-of", + "compact=p=0:nk=1", + input_file, + ] + stdout = run_command(cmd).get("out") + stream_size = sum([int(l) for l in stdout.split("\n") if l != ""]) + audio_bitrate = round((stream_size * 8 / 1024.0) / audio_duration, 2) + + ret.update( + { + "audio_duration": audio_duration, + "audio_sample_rate": audio_info["sample_rate"], + "audio_codec": audio_info["codec_name"], + "audio_bitrate": audio_bitrate, + "audio_channels": audio_info["channels"], + } + ) + + ret["video_info"] = video_info + ret["audio_info"] = audio_info + ret["is_video"] = True + ret["md5sum"] = md5sum + return ret + + +def calculate_seconds(duration): + # returns seconds, given a ffmpeg extracted string + ret = 0 + if isinstance(duration, str): + duration = duration.split(":") + if len(duration) != 3: + return ret + else: + return ret + + ret += int(float(duration[2])) + ret += int(float(duration[1])) * 60 + ret += int(float(duration[0])) * 60 * 60 + return ret + + +def show_file_size(size): + if size: + size = size / 1000000 + size = round(size, 1) + size = "{0}MB".format(str(size)) + return size + + +def get_base_ffmpeg_command( + input_file, + output_file, + has_audio, + codec, + encoder, + audio_encoder, + target_fps, + target_height, + target_rate, + target_rate_audio, + pass_file, + pass_number, + enc_type, + chunk, +): + """Get the base command for a specific codec, height/rate, and pass + + Arguments: + input_file {str} -- input file name + output_file {str} -- output file name + has_audio {bool} -- does the input have audio? + codec {str} -- video codec + encoder {str} -- video encoder + audio_encoder {str} -- audio encoder + target_fps {int} -- target FPS + target_height {int} -- height + target_rate {int} -- target bitrate in kbps + target_rate_audio {int} -- audio target bitrate + pass_file {str} -- path to temp pass file + pass_number {int} -- number of passes + enc_type {str} -- encoding type (twopass or crf) + """ + + target_fps = int(target_fps) + # avoid Frame rate very high for a muxer not efficiently supporting it. + if target_fps > 90: + target_fps = 90 + + base_cmd = [ + settings.FFMPEG_COMMAND, + "-y", + "-i", + input_file, + "-c:v", + encoder, + "-filter:v", + "scale=-2:" + str(target_height) + ",fps=fps=" + str(target_fps), + # always convert to 4:2:0 -- FIXME: this could be also 4:2:2 + # but compatibility will suffer + "-pix_fmt", + "yuv420p", + ] + + if enc_type == "twopass": + base_cmd.extend(["-b:v", str(target_rate) + "k"]) + elif enc_type == "crf": + base_cmd.extend(["-crf", str(VIDEO_CRFS[codec])]) + if encoder == "libvpx-vp9": + base_cmd.extend(["-b:v", str(target_rate) + "k"]) + + if has_audio: + base_cmd.extend( + [ + "-c:a", + audio_encoder, + "-b:a", + str(target_rate_audio) + "k", + # stereo audio only, see https://trac.ffmpeg.org/ticket/5718 + "-ac", + "2", + ] + ) + + # get keyframe distance in frames + keyframe_distance = int(target_fps * KEYFRAME_DISTANCE) + + # start building the command + cmd = base_cmd[:] + + # preset settings + if encoder == "libvpx-vp9": + if pass_number == 1: + speed = 4 + else: + speed = VP9_SPEED + elif encoder in ["libx264"]: + preset = X26x_PRESET + elif encoder in ["libx265"]: + preset = X265_PRESET + if target_height >= 720: + preset = X26x_PRESET_BIG_HEIGHT + + if encoder == "libx264": + level = "4.2" if target_height <= 1080 else "5.2" + + x264_params = [ + "keyint=" + str(keyframe_distance * 2), + "keyint_min=" + str(keyframe_distance), + ] + + cmd.extend( + [ + "-maxrate", + str(int(int(target_rate) * MAX_RATE_MULTIPLIER)) + "k", + "-bufsize", + str(int(int(target_rate) * BUF_SIZE_MULTIPLIER)) + "k", + "-force_key_frames", + "expr:gte(t,n_forced*" + str(KEYFRAME_DISTANCE) + ")", + "-x264-params", + ":".join(x264_params), + "-preset", + preset, + "-profile:v", + VIDEO_PROFILES[codec], + "-level", + level, + ] + ) + + if enc_type == "twopass": + cmd.extend(["-passlogfile", pass_file, "-pass", pass_number]) + + elif encoder == "libx265": + x265_params = [ + "vbv-maxrate=" + str(int(int(target_rate) * MAX_RATE_MULTIPLIER)), + "vbv-bufsize=" + str(int(int(target_rate) * BUF_SIZE_MULTIPLIER)), + "keyint=" + str(keyframe_distance * 2), + "keyint_min=" + str(keyframe_distance), + ] + + if enc_type == "twopass": + x265_params.extend(["stats=" + str(pass_file), "pass=" + str(pass_number)]) + + cmd.extend( + [ + "-force_key_frames", + "expr:gte(t,n_forced*" + str(KEYFRAME_DISTANCE) + ")", + "-x265-params", + ":".join(x265_params), + "-preset", + preset, + "-profile:v", + VIDEO_PROFILES[codec], + ] + ) + elif encoder == "libvpx-vp9": + cmd.extend( + [ + "-g", + str(keyframe_distance), + "-keyint_min", + str(keyframe_distance), + "-maxrate", + str(int(int(target_rate) * MAX_RATE_MULTIPLIER)) + "k", + "-bufsize", + str(int(int(target_rate) * BUF_SIZE_MULTIPLIER)) + "k", + "-speed", + speed, + # '-deadline', 'realtime', + ] + ) + + if enc_type == "twopass": + cmd.extend(["-passlogfile", pass_file, "-pass", pass_number]) + + cmd.extend( + [ + "-strict", + "-2", + ] + ) + + # end of the command + if pass_number == 1: + cmd.extend(["-an", "-f", "null", "/dev/null"]) + elif pass_number == 2: + if output_file.endswith("mp4") and chunk: + cmd.extend(["-movflags", "+faststart"]) + cmd.extend([output_file]) + + return cmd + + +def produce_ffmpeg_commands( + media_file, media_info, resolution, codec, output_filename, pass_file, chunk=False +): + try: + media_info = json.loads(media_info) + except BaseException: + media_info = {} + + if codec == "h264": + encoder = "libx264" + ext = "mp4" + elif codec in ["h265", "hevc"]: + encoder = "libx265" + ext = "mp4" + elif codec == "vp9": + encoder = "libvpx-vp9" + ext = "webm" + else: + return False + + src_framerate = media_info.get("video_frame_rate", 30) + if src_framerate <= 30: + target_rate = VIDEO_BITRATES[codec][25].get(resolution) + else: + target_rate = VIDEO_BITRATES[codec][60].get(resolution) + if not target_rate: # INVESTIGATE MORE! + target_rate = VIDEO_BITRATES[codec][25].get(resolution) + if not target_rate: + return False + + if media_info.get("video_height") < resolution: + if resolution not in [240, 360]: # always get these two + return False + + # if codec == "h264_baseline": + # target_fps = 25 + # else: + + # adjust the target frame rate if the input is fractional + target_fps = ( + src_framerate if isinstance(src_framerate, int) else math.ceil(src_framerate) + ) + + if media_info.get("video_duration") > CRF_ENCODING_NUM_SECONDS: + enc_type = "crf" + else: + enc_type = "twopass" + + if enc_type == "twopass": + passes = [1, 2] + elif enc_type == "crf": + passes = [2] + + cmds = [] + for pass_number in passes: + cmds.append( + get_base_ffmpeg_command( + media_file, + output_file=output_filename, + has_audio=media_info.get("has_audio"), + codec=codec, + encoder=encoder, + audio_encoder=AUDIO_ENCODERS[codec], + target_fps=target_fps, + target_height=resolution, + target_rate=target_rate, + target_rate_audio=AUDIO_BITRATES[codec], + pass_file=pass_file, + pass_number=pass_number, + enc_type=enc_type, + chunk=chunk, + ) + ) + return cmds + + +def clean_query(query): + """This is used to clear text in order to comply with SearchQuery + known exception cases + + :param query: str - the query text that we want to clean + :return: + """ + + if not query: + return "" + + chars = ["^", "{", "}", "&", "|", "<", ">", '"', ")", "(", "!", ":", ";", "'", "#"] + for char in chars: + query = query.replace(char, "") + + return query.lower() diff --git a/files/management_views.py b/files/management_views.py new file mode 100644 index 0000000..4cc36dd --- /dev/null +++ b/files/management_views.py @@ -0,0 +1,195 @@ +from rest_framework.views import APIView +from rest_framework.parsers import JSONParser +from rest_framework.settings import api_settings +from rest_framework.response import Response +from rest_framework import status + +from users.models import User +from users.serializers import UserSerializer +from .permissions import IsMediacmsEditor +from .models import Media, Comment +from .methods import is_mediacms_manager + +from .serializers import MediaSerializer, CommentSerializer + + +class MediaList(APIView): + """Media listings + Used on management pages of MediaCMS + Should be available only to MediaCMS editors, + managers and admins + """ + + permission_classes = (IsMediacmsEditor,) + parser_classes = (JSONParser,) + + def get(self, request, format=None): + params = self.request.query_params + ordering = params.get("ordering", "").strip() + sort_by = params.get("sort_by", "").strip() + state = params.get("state", "").strip() + encoding_status = params.get("encoding_status", "").strip() + media_type = params.get("media_type", "").strip() + + featured = params.get("featured", "").strip() + is_reviewed = params.get("is_reviewed", "").strip() + + sort_by_options = [ + "title", + "add_date", + "edit_date", + "views", + "likes", + "reported_times", + ] + if sort_by not in sort_by_options: + sort_by = "add_date" + if ordering == "asc": + ordering = "" + else: + ordering = "-" + + if media_type not in ["video", "image", "audio", "pdf"]: + media_type = None + + if state not in ["private", "public", "unlisted"]: + state = None + + if encoding_status not in ["pending", "running", "fail", "success"]: + encoding_status = None + + if featured == "true": + featured = True + elif featured == "false": + featured = False + else: + featured = "all" + if is_reviewed == "true": + is_reviewed = True + elif is_reviewed == "false": + is_reviewed = False + else: + is_reviewed = "all" + + pagination_class = api_settings.DEFAULT_PAGINATION_CLASS + qs = Media.objects.filter() + if state: + qs = qs.filter(state=state) + if encoding_status: + qs = qs.filter(encoding_status=encoding_status) + if media_type: + qs = qs.filter(media_type=media_type) + + if featured != "all": + qs = qs.filter(featured=featured) + if is_reviewed != "all": + qs = qs.filter(is_reviewed=is_reviewed) + + media = qs.order_by(f"{ordering}{sort_by}") + + paginator = pagination_class() + + page = paginator.paginate_queryset(media, request) + + serializer = MediaSerializer(page, many=True, context={"request": request}) + return paginator.get_paginated_response(serializer.data) + + def delete(self, request, format=None): + tokens = request.GET.get("tokens") + if tokens: + tokens = tokens.split(",") + Media.objects.filter(friendly_token__in=tokens).delete() + return Response(status=status.HTTP_204_NO_CONTENT) + + +class CommentList(APIView): + """Comments listings + Used on management pages of MediaCMS + Should be available only to MediaCMS editors, + managers and admins + """ + + permission_classes = (IsMediacmsEditor,) + parser_classes = (JSONParser,) + + def get(self, request, format=None): + params = self.request.query_params + ordering = params.get("ordering", "").strip() + sort_by = params.get("sort_by", "").strip() + + sort_by_options = ["text", "add_date"] + if sort_by not in sort_by_options: + sort_by = "add_date" + if ordering == "asc": + ordering = "" + else: + ordering = "-" + + pagination_class = api_settings.DEFAULT_PAGINATION_CLASS + + qs = Comment.objects.filter() + media = qs.order_by(f"{ordering}{sort_by}") + + paginator = pagination_class() + + page = paginator.paginate_queryset(media, request) + + serializer = CommentSerializer(page, many=True, context={"request": request}) + return paginator.get_paginated_response(serializer.data) + + + def delete(self, request, format=None): + comment_ids = request.GET.get('comment_ids') + if comment_ids: + comments = comment_ids.split(',') + Comment.objects.filter(uid__in=comments).delete() + return Response(status=status.HTTP_204_NO_CONTENT) + + +class UserList(APIView): + """Users listings + Used on management pages of MediaCMS + Should be available only to MediaCMS editors, + managers and admins. Delete should be option + for managers+admins only. + """ + + permission_classes = (IsMediacmsEditor,) + parser_classes = (JSONParser,) + + def get(self, request, format=None): + params = self.request.query_params + ordering = params.get("ordering", "").strip() + sort_by = params.get("sort_by", "").strip() + + sort_by_options = ["date_added", "name"] + if sort_by not in sort_by_options: + sort_by = "date_added" + if ordering == "asc": + ordering = "" + else: + ordering = "-" + + pagination_class = api_settings.DEFAULT_PAGINATION_CLASS + + qs = User.objects.filter() + media = qs.order_by(f"{ordering}{sort_by}") + + paginator = pagination_class() + + page = paginator.paginate_queryset(media, request) + + serializer = UserSerializer(page, many=True, context={"request": request}) + return paginator.get_paginated_response(serializer.data) + + def delete(self, request, format=None): + if not is_mediacms_manager(request.user): + return Response( + {"detail": "bad permissions"}, status=status.HTTP_400_BAD_REQUEST + ) + + tokens = request.GET.get("tokens") + if tokens: + tokens = tokens.split(",") + User.objects.filter(username__in=tokens).delete() + return Response(status=status.HTTP_204_NO_CONTENT) diff --git a/files/methods.py b/files/methods.py new file mode 100644 index 0000000..2bd81f0 --- /dev/null +++ b/files/methods.py @@ -0,0 +1,437 @@ +# Kudos to Werner Robitza, AVEQ GmbH, for helping with ffmpeg +# related content + +import logging +import random +import itertools +from datetime import datetime +from cms import celery_app +from django.conf import settings +from django.core.cache import cache +from django.db.models import Q +from django.core.mail import EmailMessage + +from . import models +from .helpers import mask_ip + +logger = logging.getLogger(__name__) + + +def get_user_or_session(request): + """Return a dictionary with user info + whether user is authenticated or not + this is used in action calculations, example for + increasing the watch counter of a media + """ + + ret = {} + if request.user.is_authenticated: + ret["user_id"] = request.user.id + else: + if not request.session.session_key: + request.session.save() + ret["user_session"] = request.session.session_key + if settings.MASK_IPS_FOR_ACTIONS: + ret["remote_ip_addr"] = mask_ip(request.META.get("REMOTE_ADDR")) + else: + ret["remote_ip_addr"] = request.META.get("REMOTE_ADDR") + return ret + + +def pre_save_action(media, user, session_key, action, remote_ip): + """This will perform some checkes + example threshold checks, before performing an action + """ + + from actions.models import MediaAction + + if user: + query = MediaAction.objects.filter(media=media, action=action, user=user) + else: + query = MediaAction.objects.filter( + media=media, action=action, session_key=session_key + ) + query = query.order_by("-action_date") + + if query: + query = query.first() + if action in ["like", "dislike", "report"]: + return False # has alread done action once + elif action == "watch" and user: + # increase the number of times a media is viewed + if media.duration: + now = datetime.now(query.action_date.tzinfo) + if (now - query.action_date).seconds > media.duration: + return True + else: + if user: # first time action + return True + + if not user: + # perform some checking for requests where no session + # id is specified (and user is anonymous) to avoid spam + # eg allow for the same remote_ip for a specific number of actions + query = ( + MediaAction.objects.filter(media=media, action=action, remote_ip=remote_ip) + .filter(user=None) + .order_by("-action_date") + ) + if query: + query = query.first() + now = datetime.now(query.action_date.tzinfo) + if action == "watch": + if not (now - query.action_date).seconds > media.duration: + return False + if (now - query.action_date).seconds > settings.TIME_TO_ACTION_ANONYMOUS: + return True + else: + return True + + return False + + +def is_mediacms_editor(user): + """Whether user is MediaCMS editor""" + + editor = False + try: + if user.is_superuser or user.is_manager or user.is_editor: + editor = True + except BaseException: + pass + return editor + + +def is_mediacms_manager(user): + """Whether user is MediaCMS manager""" + + manager = False + try: + if user.is_superuser or user.is_manager: + manager = True + except BaseException: + pass + return manager + + +def get_next_state(user, current_state, next_state): + """Return valid state, given a current and next state + and the user object. + Users may themselves perform only allowed transitions + """ + + if next_state not in ["public", "private", "unlisted"]: + next_state = settings.PORTAL_WORKFLOW # get default state + if is_mediacms_editor(user): + # allow any transition + return next_state + + if settings.PORTAL_WORKFLOW == "private": + next_state = "private" + + if settings.PORTAL_WORKFLOW == "unlisted": + # don't allow to make media public in this case + if next_state == "public": + next_state = current_state + + return next_state + + +def notify_users(friendly_token=None, action=None, extra=None): + """Notify users through email, for a set of actions""" + + notify_items = [] + media = None + if friendly_token: + media = models.Media.objects.filter(friendly_token=friendly_token).first() + if not media: + return False + media_url = settings.SSL_FRONTEND_HOST + media.get_absolute_url() + + if action == "media_reported" and media: + if settings.ADMINS_NOTIFICATIONS.get("MEDIA_REPORTED", False): + title = "[{}] - Media was reported".format(settings.PORTAL_NAME) + msg = """ +Media %s was reported. +Reason: %s\n +Total times this media has been reported: %s + """ % ( + media_url, + extra, + media.reported_times, + ) + d = {} + d["title"] = title + d["msg"] = msg + d["to"] = settings.ADMIN_EMAIL_LIST + notify_items.append(d) + + if action == "media_added" and media: + if settings.ADMINS_NOTIFICATIONS.get("MEDIA_ADDED", False): + title = "[{}] - Media was added".format(settings.PORTAL_NAME) + msg = """ +Media %s was added by user %s. +""" % ( + media_url, + media.user, + ) + d = {} + d["title"] = title + d["msg"] = msg + d["to"] = settings.ADMIN_EMAIL_LIST + notify_items.append(d) + if settings.USERS_NOTIFICATIONS.get("MEDIA_ADDED", False): + title = "[{}] - Your media was added".format(settings.PORTAL_NAME) + msg = """ +Your media has been added! It will be encoded and will be available soon. +URL: %s + """ % ( + media_url + ) + d = {} + d["title"] = title + d["msg"] = msg + d["to"] = [media.user.email] + notify_items.append(d) + + for item in notify_items: + email = EmailMessage( + item["title"], item["msg"], settings.DEFAULT_FROM_EMAIL, item["to"] + ) + email.send(fail_silently=True) + return True + + +def show_recommended_media(request, limit=100): + """Return a list of recommended media + used on the index page + """ + + basic_query = Q(listable=True) + pmi = cache.get("popular_media_ids") + # produced by task get_list_of_popular_media and cached + if pmi: + media = list( + models.Media.objects.filter(friendly_token__in=pmi) + .filter(basic_query) + .prefetch_related("user")[:limit] + ) + else: + media = list( + models.Media.objects.filter(basic_query) + .order_by("-views", "-likes") + .prefetch_related("user")[:limit] + ) + random.shuffle(media) + return media + + +def show_related_media(media, request=None, limit=100): + """Return a list of related media""" + + if settings.RELATED_MEDIA_STRATEGY == "calculated": + return show_related_media_calculated(media, request, limit) + elif settings.RELATED_MEDIA_STRATEGY == "author": + return show_related_media_author(media, request, limit) + + return show_related_media_content(media, request, limit) + + +def show_related_media_content(media, request, limit): + """Return a list of related media based on simple calculations""" + + # Create list with author items + # then items on same category, then some random(latest) + # Aim is to always show enough (limit) videos + # and include author videos in any case + + q_author = Q(listable=True, user=media.user) + m = list( + models.Media.objects.filter(q_author) + .order_by() + .prefetch_related("user")[:limit] + ) + + # order by random criteria so that it doesn't bring the same results + # attention: only fields that are indexed make sense here! also need + # find a way for indexes with more than 1 field + order_criteria = [ + "-views", + "views", + "add_date", + "-add_date", + "featured", + "-featured", + "user_featured", + "-user_featured", + ] + # TODO: MAke this mess more readable, and add TAGS support - aka related + # tags rather than random media + if len(m) < limit: + category = media.category.first() + if category: + q_category = Q(listable=True, category=category) + q_res = ( + models.Media.objects.filter(q_category) + .order_by(order_criteria[random.randint(0, len(order_criteria) - 1)]) + .prefetch_related("user")[: limit - media.user.media_count] + ) + m = list(itertools.chain(m, q_res)) + + if len(m) < limit: + q_generic = Q(listable=True) + q_res = ( + models.Media.objects.filter(q_generic) + .order_by(order_criteria[random.randint(0, len(order_criteria) - 1)]) + .prefetch_related("user")[: limit - media.user.media_count] + ) + m = list(itertools.chain(m, q_res)) + + m = list(set(m[:limit])) # remove duplicates + + try: + m.remove(media) # remove media from results + except ValueError: + pass + + random.shuffle(m) + return m + + +def show_related_media_author(media, request, limit): + """Return a list of related media form the same author""" + + q_author = Q(listable=True, user=media.user) + m = list( + models.Media.objects.filter(q_author) + .order_by() + .prefetch_related("user")[:limit] + ) + + # order by random criteria so that it doesn't bring the same results + # attention: only fields that are indexed make sense here! also need + # find a way for indexes with more than 1 field + + m = list(set(m[:limit])) # remove duplicates + + try: + m.remove(media) # remove media from results + except ValueError: + pass + + random.shuffle(m) + return m + + +def show_related_media_calculated(media, request, limit): + + """Return a list of related media based on ML recommendations + A big todo! + """ + + return [] + + +def update_user_ratings(user, media, user_ratings): + """Populate user ratings for a media""" + + for rating in user_ratings: + user_rating = ( + models.Rating.objects.filter( + user=user, media_id=media, rating_category_id=rating.get("category_id") + ) + .only("score") + .first() + ) + if user_rating: + rating["score"] = user_rating.score + return user_ratings + + +def notify_user_on_comment(friendly_token): + """Notify users through email, for a set of actions""" + + media = None + media = models.Media.objects.filter(friendly_token=friendly_token).first() + if not media: + return False + + user = media.user + media_url = settings.SSL_FRONTEND_HOST + media.get_absolute_url() + + if user.notification_on_comments: + title = "[{}] - A comment was added".format(settings.PORTAL_NAME) + msg = """ +A comment has been added to your media %s . +View it on %s + """ % ( + media.title, + media_url, + ) + email = EmailMessage( + title, msg, settings.DEFAULT_FROM_EMAIL, [media.user.email] + ) + email.send(fail_silently=True) + return True + + +def list_tasks(): + """Lists celery tasks + To be used in an admin dashboard + """ + + i = celery_app.control.inspect([]) + ret = {} + temp = {} + task_ids = [] + media_profile_pairs = [] + + temp["active"] = i.active() + temp["reserved"] = i.reserved() + temp["scheduled"] = i.scheduled() + + for state, state_dict in temp.items(): + ret[state] = {} + ret[state]["tasks"] = [] + for worker, worker_dict in state_dict.items(): + for task in worker_dict: + task_dict = {} + task_dict["worker"] = worker + task_dict["task_id"] = task.get("id") + task_ids.append(task.get("id")) + task_dict["args"] = task.get("args") + task_dict["name"] = task.get("name") + task_dict["time_start"] = task.get("time_start") + if task.get("name") == "encode_media": + task_args = task.get("args") + for bad in "(),'": + task_args = task_args.replace(bad, "") + friendly_token = task_args.split()[0] + profile_id = task_args.split()[1] + + media = models.Media.objects.filter( + friendly_token=friendly_token + ).first() + if media: + profile = models.EncodeProfile.objects.filter( + id=profile_id + ).first() + if profile: + media_profile_pairs.append( + (media.friendly_token, profile.id) + ) + task_dict["info"] = {} + task_dict["info"]["profile name"] = profile.name + task_dict["info"]["media title"] = media.title + encoding = models.Encoding.objects.filter( + task_id=task.get("id") + ).first() + if encoding: + task_dict["info"][ + "encoding progress" + ] = encoding.progress + + ret[state]["tasks"].append(task_dict) + ret["task_ids"] = task_ids + ret["media_profile_pairs"] = media_profile_pairs + return ret diff --git a/files/migrations/0001_initial.py b/files/migrations/0001_initial.py new file mode 100644 index 0000000..306d9f4 --- /dev/null +++ b/files/migrations/0001_initial.py @@ -0,0 +1,637 @@ +# Generated by Django 3.1.4 on 2020-12-01 07:12 + +import django.contrib.postgres.search +from django.db import migrations, models +import files.models +import imagekit.models.fields +import uuid + + +class Migration(migrations.Migration): + + initial = True + + dependencies = [] + + operations = [ + migrations.CreateModel( + name="Category", + fields=[ + ( + "id", + models.AutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ("uid", models.UUIDField(default=uuid.uuid4, unique=True)), + ("add_date", models.DateTimeField(auto_now_add=True)), + ("title", models.CharField(db_index=True, max_length=100, unique=True)), + ("description", models.TextField(blank=True)), + ( + "is_global", + models.BooleanField( + default=False, help_text="global categories or user specific" + ), + ), + ( + "media_count", + models.IntegerField(default=0, help_text="number of media"), + ), + ( + "thumbnail", + imagekit.models.fields.ProcessedImageField( + blank=True, upload_to=files.models.category_thumb_path + ), + ), + ( + "listings_thumbnail", + models.CharField( + blank=True, + help_text="Thumbnail to show on listings", + max_length=400, + null=True, + ), + ), + ], + options={ + "verbose_name_plural": "Categories", + "ordering": ["title"], + }, + ), + migrations.CreateModel( + name="Comment", + fields=[ + ( + "id", + models.AutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ("add_date", models.DateTimeField(auto_now_add=True)), + ("text", models.TextField(help_text="text")), + ("uid", models.UUIDField(default=uuid.uuid4, unique=True)), + ("lft", models.PositiveIntegerField(editable=False)), + ("rght", models.PositiveIntegerField(editable=False)), + ("tree_id", models.PositiveIntegerField(db_index=True, editable=False)), + ("level", models.PositiveIntegerField(editable=False)), + ], + options={ + "abstract": False, + }, + ), + migrations.CreateModel( + name="EncodeProfile", + fields=[ + ( + "id", + models.AutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ("name", models.CharField(max_length=90)), + ( + "extension", + models.CharField( + choices=[("mp4", "mp4"), ("webm", "webm"), ("gif", "gif")], + max_length=10, + ), + ), + ( + "resolution", + models.IntegerField( + blank=True, + choices=[ + (2160, "2160"), + (1440, "1440"), + (1080, "1080"), + (720, "720"), + (480, "480"), + (360, "360"), + (240, "240"), + ], + null=True, + ), + ), + ( + "codec", + models.CharField( + blank=True, + choices=[("h265", "h265"), ("h264", "h264"), ("vp9", "vp9")], + max_length=10, + null=True, + ), + ), + ("description", models.TextField(blank=True, help_text="description")), + ("active", models.BooleanField(default=True)), + ], + options={ + "ordering": ["resolution"], + }, + ), + migrations.CreateModel( + name="Encoding", + fields=[ + ( + "id", + models.AutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ("add_date", models.DateTimeField(auto_now_add=True)), + ("commands", models.TextField(blank=True, help_text="commands run")), + ( + "chunk", + models.BooleanField( + db_index=True, default=False, help_text="is chunk?" + ), + ), + ("chunk_file_path", models.CharField(blank=True, max_length=400)), + ("chunks_info", models.TextField(blank=True)), + ("logs", models.TextField(blank=True)), + ("md5sum", models.CharField(blank=True, max_length=50, null=True)), + ( + "media_file", + models.FileField( + blank=True, + max_length=500, + upload_to=files.models.encoding_media_file_path, + verbose_name="encoding file", + ), + ), + ("progress", models.PositiveSmallIntegerField(default=0)), + ("update_date", models.DateTimeField(auto_now=True)), + ("retries", models.IntegerField(default=0)), + ("size", models.CharField(blank=True, max_length=20)), + ( + "status", + models.CharField( + choices=[ + ("pending", "Pending"), + ("running", "Running"), + ("fail", "Fail"), + ("success", "Success"), + ], + default="pending", + max_length=20, + ), + ), + ("temp_file", models.CharField(blank=True, max_length=400)), + ("task_id", models.CharField(blank=True, max_length=100)), + ("total_run_time", models.IntegerField(default=0)), + ("worker", models.CharField(blank=True, max_length=100)), + ], + ), + migrations.CreateModel( + name="Language", + fields=[ + ( + "id", + models.AutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ("code", models.CharField(help_text="language code", max_length=12)), + ("title", models.CharField(help_text="language code", max_length=100)), + ], + options={ + "ordering": ["id"], + }, + ), + migrations.CreateModel( + name="License", + fields=[ + ( + "id", + models.AutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ("title", models.CharField(max_length=100, unique=True)), + ("description", models.TextField(blank=True)), + ], + ), + migrations.CreateModel( + name="Media", + fields=[ + ( + "id", + models.AutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ( + "add_date", + models.DateTimeField( + blank=True, + db_index=True, + null=True, + verbose_name="Date produced", + ), + ), + ( + "allow_download", + models.BooleanField( + default=True, + help_text="Whether option to download media is shown", + ), + ), + ("description", models.TextField(blank=True)), + ("dislikes", models.IntegerField(default=0)), + ("duration", models.IntegerField(default=0)), + ("edit_date", models.DateTimeField(auto_now=True)), + ( + "enable_comments", + models.BooleanField( + default=True, + help_text="Whether comments will be allowed for this media", + ), + ), + ( + "encoding_status", + models.CharField( + choices=[ + ("pending", "Pending"), + ("running", "Running"), + ("fail", "Fail"), + ("success", "Success"), + ], + db_index=True, + default="pending", + max_length=20, + ), + ), + ( + "featured", + models.BooleanField( + db_index=True, + default=False, + help_text="Whether media is globally featured by a MediaCMS editor", + ), + ), + ( + "friendly_token", + models.CharField( + blank=True, + db_index=True, + help_text="Identifier for the Media", + max_length=12, + ), + ), + ( + "hls_file", + models.CharField( + blank=True, + help_text="Path to HLS file for videos", + max_length=1000, + ), + ), + ( + "is_reviewed", + models.BooleanField( + db_index=True, + default=True, + help_text="Whether media is reviewed, so it can appear on public listings", + ), + ), + ("likes", models.IntegerField(db_index=True, default=1)), + ( + "listable", + models.BooleanField( + default=False, help_text="Whether it will appear on listings" + ), + ), + ( + "md5sum", + models.CharField( + blank=True, + help_text="Not exposed, used internally", + max_length=50, + null=True, + ), + ), + ( + "media_file", + models.FileField( + help_text="media file", + max_length=500, + upload_to=files.models.original_media_file_path, + verbose_name="media file", + ), + ), + ( + "media_info", + models.TextField( + blank=True, help_text="extracted media metadata info" + ), + ), + ( + "media_type", + models.CharField( + blank=True, + choices=[ + ("video", "Video"), + ("image", "Image"), + ("pdf", "Pdf"), + ("audio", "Audio"), + ], + db_index=True, + default="video", + max_length=20, + ), + ), + ( + "password", + models.CharField( + blank=True, + help_text="password for private media", + max_length=100, + ), + ), + ( + "preview_file_path", + models.CharField( + blank=True, + help_text="preview gif for videos, path in filesystem", + max_length=500, + ), + ), + ( + "poster", + imagekit.models.fields.ProcessedImageField( + blank=True, + help_text="media extracted big thumbnail, shown on media page", + max_length=500, + upload_to=files.models.original_thumbnail_file_path, + ), + ), + ( + "reported_times", + models.IntegerField( + default=0, help_text="how many time a Medis is reported" + ), + ), + ( + "search", + django.contrib.postgres.search.SearchVectorField( + help_text="used to store all searchable info and metadata for a Media", + null=True, + ), + ), + ( + "size", + models.CharField( + blank=True, + help_text="media size in bytes, automatically calculated", + max_length=20, + null=True, + ), + ), + ( + "sprites", + models.FileField( + blank=True, + help_text="sprites file, only for videos, displayed on the video player", + max_length=500, + upload_to=files.models.original_thumbnail_file_path, + ), + ), + ( + "state", + models.CharField( + choices=[ + ("private", "Private"), + ("public", "Public"), + ("unlisted", "Unlisted"), + ], + db_index=True, + default="public", + help_text="state of Media", + max_length=20, + ), + ), + ( + "title", + models.CharField( + blank=True, + db_index=True, + help_text="media title", + max_length=100, + ), + ), + ( + "thumbnail", + imagekit.models.fields.ProcessedImageField( + blank=True, + help_text="media extracted small thumbnail, shown on listings", + max_length=500, + upload_to=files.models.original_thumbnail_file_path, + ), + ), + ( + "thumbnail_time", + models.FloatField( + blank=True, + help_text="Time on video that a thumbnail will be taken", + null=True, + ), + ), + ( + "uid", + models.UUIDField( + default=uuid.uuid4, + help_text="A unique identifier for the Media", + unique=True, + ), + ), + ( + "uploaded_thumbnail", + imagekit.models.fields.ProcessedImageField( + blank=True, + help_text="thumbnail from uploaded_poster field", + max_length=500, + upload_to=files.models.original_thumbnail_file_path, + ), + ), + ( + "uploaded_poster", + imagekit.models.fields.ProcessedImageField( + blank=True, + help_text="This image will characterize the media", + max_length=500, + upload_to=files.models.original_thumbnail_file_path, + verbose_name="Upload image", + ), + ), + ( + "user_featured", + models.BooleanField( + default=False, help_text="Featured by the user" + ), + ), + ("video_height", models.IntegerField(default=1)), + ("views", models.IntegerField(db_index=True, default=1)), + ], + options={ + "ordering": ["-add_date"], + }, + ), + migrations.CreateModel( + name="Playlist", + fields=[ + ( + "id", + models.AutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ("add_date", models.DateTimeField(auto_now_add=True, db_index=True)), + ("description", models.TextField(blank=True, help_text="description")), + ( + "friendly_token", + models.CharField(blank=True, db_index=True, max_length=12), + ), + ("title", models.CharField(db_index=True, max_length=100)), + ("uid", models.UUIDField(default=uuid.uuid4, unique=True)), + ], + ), + migrations.CreateModel( + name="PlaylistMedia", + fields=[ + ( + "id", + models.AutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ("action_date", models.DateTimeField(auto_now=True)), + ("ordering", models.IntegerField(default=1)), + ], + options={ + "ordering": ["ordering", "-action_date"], + }, + ), + migrations.CreateModel( + name="Rating", + fields=[ + ( + "id", + models.AutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ("add_date", models.DateTimeField(auto_now_add=True)), + ( + "score", + models.IntegerField(validators=[files.models.validate_rating]), + ), + ], + options={ + "verbose_name_plural": "Ratings", + }, + ), + migrations.CreateModel( + name="RatingCategory", + fields=[ + ( + "id", + models.AutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ("description", models.TextField(blank=True)), + ("enabled", models.BooleanField(default=True)), + ("title", models.CharField(db_index=True, max_length=200, unique=True)), + ], + options={ + "verbose_name_plural": "Rating Categories", + }, + ), + migrations.CreateModel( + name="Subtitle", + fields=[ + ( + "id", + models.AutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ( + "subtitle_file", + models.FileField( + help_text="File has to be WebVTT format", + max_length=500, + upload_to=files.models.subtitles_file_path, + verbose_name="Subtitle/CC file", + ), + ), + ], + ), + migrations.CreateModel( + name="Tag", + fields=[ + ( + "id", + models.AutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ("title", models.CharField(db_index=True, max_length=100, unique=True)), + ( + "media_count", + models.IntegerField(default=0, help_text="number of media"), + ), + ( + "listings_thumbnail", + models.CharField( + blank=True, + db_index=True, + help_text="Thumbnail to show on listings", + max_length=400, + null=True, + ), + ), + ], + options={ + "ordering": ["title"], + }, + ), + ] diff --git a/files/migrations/0002_auto_20201201_0712.py b/files/migrations/0002_auto_20201201_0712.py new file mode 100644 index 0000000..6b1b0f4 --- /dev/null +++ b/files/migrations/0002_auto_20201201_0712.py @@ -0,0 +1,240 @@ +# Generated by Django 3.1.4 on 2020-12-01 07:12 + +from django.conf import settings +import django.contrib.postgres.indexes +from django.db import migrations, models +import django.db.models.deletion +import mptt.fields + + +class Migration(migrations.Migration): + + initial = True + + dependencies = [ + migrations.swappable_dependency(settings.AUTH_USER_MODEL), + ("files", "0001_initial"), + ("users", "0001_initial"), + ] + + operations = [ + migrations.AddField( + model_name="tag", + name="user", + field=models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.CASCADE, + to=settings.AUTH_USER_MODEL, + ), + ), + migrations.AddField( + model_name="subtitle", + name="language", + field=models.ForeignKey( + on_delete=django.db.models.deletion.CASCADE, to="files.language" + ), + ), + migrations.AddField( + model_name="subtitle", + name="media", + field=models.ForeignKey( + on_delete=django.db.models.deletion.CASCADE, + related_name="subtitles", + to="files.media", + ), + ), + migrations.AddField( + model_name="subtitle", + name="user", + field=models.ForeignKey( + on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL + ), + ), + migrations.AddField( + model_name="rating", + name="media", + field=models.ForeignKey( + on_delete=django.db.models.deletion.CASCADE, + related_name="ratings", + to="files.media", + ), + ), + migrations.AddField( + model_name="rating", + name="rating_category", + field=models.ForeignKey( + on_delete=django.db.models.deletion.CASCADE, to="files.ratingcategory" + ), + ), + migrations.AddField( + model_name="rating", + name="user", + field=models.ForeignKey( + on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL + ), + ), + migrations.AddField( + model_name="playlistmedia", + name="media", + field=models.ForeignKey( + on_delete=django.db.models.deletion.CASCADE, to="files.media" + ), + ), + migrations.AddField( + model_name="playlistmedia", + name="playlist", + field=models.ForeignKey( + on_delete=django.db.models.deletion.CASCADE, to="files.playlist" + ), + ), + migrations.AddField( + model_name="playlist", + name="media", + field=models.ManyToManyField( + blank=True, through="files.PlaylistMedia", to="files.Media" + ), + ), + migrations.AddField( + model_name="playlist", + name="user", + field=models.ForeignKey( + on_delete=django.db.models.deletion.CASCADE, + related_name="playlists", + to=settings.AUTH_USER_MODEL, + ), + ), + migrations.AddField( + model_name="media", + name="category", + field=models.ManyToManyField( + blank=True, + help_text="Media can be part of one or more categories", + to="files.Category", + ), + ), + migrations.AddField( + model_name="media", + name="channel", + field=models.ForeignKey( + blank=True, + help_text="Media can exist in one or no Channels", + null=True, + on_delete=django.db.models.deletion.CASCADE, + to="users.channel", + ), + ), + migrations.AddField( + model_name="media", + name="license", + field=models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.CASCADE, + to="files.license", + ), + ), + migrations.AddField( + model_name="media", + name="rating_category", + field=models.ManyToManyField( + blank=True, + help_text="Rating category, if media Rating is allowed", + to="files.RatingCategory", + ), + ), + migrations.AddField( + model_name="media", + name="tags", + field=models.ManyToManyField( + blank=True, + help_text="select one or more out of the existing tags", + to="files.Tag", + ), + ), + migrations.AddField( + model_name="media", + name="user", + field=models.ForeignKey( + help_text="user that uploads the media", + on_delete=django.db.models.deletion.CASCADE, + to=settings.AUTH_USER_MODEL, + ), + ), + migrations.AddField( + model_name="encoding", + name="media", + field=models.ForeignKey( + on_delete=django.db.models.deletion.CASCADE, + related_name="encodings", + to="files.media", + ), + ), + migrations.AddField( + model_name="encoding", + name="profile", + field=models.ForeignKey( + on_delete=django.db.models.deletion.CASCADE, to="files.encodeprofile" + ), + ), + migrations.AddField( + model_name="comment", + name="media", + field=models.ForeignKey( + on_delete=django.db.models.deletion.CASCADE, + related_name="comments", + to="files.media", + ), + ), + migrations.AddField( + model_name="comment", + name="parent", + field=mptt.fields.TreeForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.CASCADE, + related_name="children", + to="files.comment", + ), + ), + migrations.AddField( + model_name="comment", + name="user", + field=models.ForeignKey( + on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL + ), + ), + migrations.AddField( + model_name="category", + name="user", + field=models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.CASCADE, + to=settings.AUTH_USER_MODEL, + ), + ), + migrations.AddIndex( + model_name="rating", + index=models.Index( + fields=["user", "media"], name="files_ratin_user_id_72ca6a_idx" + ), + ), + migrations.AlterUniqueTogether( + name="rating", + unique_together={("user", "media", "rating_category")}, + ), + migrations.AddIndex( + model_name="media", + index=models.Index( + fields=["state", "encoding_status", "is_reviewed"], + name="files_media_state_666b93_idx", + ), + ), + migrations.AddIndex( + model_name="media", + index=django.contrib.postgres.indexes.GinIndex( + fields=["search"], name="files_media_search_7194c6_gin" + ), + ), + ] diff --git a/files/migrations/__init__.py b/files/migrations/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/files/models.py b/files/models.py new file mode 100644 index 0000000..65ec6b4 --- /dev/null +++ b/files/models.py @@ -0,0 +1,1714 @@ +import logging +import uuid +import os +import re +import tempfile +import random +import json +import m3u8 +from django.utils import timezone +from django.db import connection +from django.db import models +from django.template.defaultfilters import slugify +from django.conf import settings +from django.contrib.postgres.indexes import GinIndex +from django.db.models.signals import pre_delete, post_delete, post_save, m2m_changed +from django.core.files import File +from django.core.exceptions import ValidationError +from django.dispatch import receiver +from django.urls import reverse +from django.utils.html import strip_tags +from django.contrib.postgres.search import SearchVectorField +from mptt.models import MPTTModel, TreeForeignKey + +from imagekit.processors import ResizeToFit +from imagekit.models import ProcessedImageField + +from . import helpers +from .methods import notify_users +from .stop_words import STOP_WORDS + +logger = logging.getLogger(__name__) + +RE_TIMECODE = re.compile(r"(\d+:\d+:\d+.\d+)") + +# this is used by Media and Encoding models +# reflects media encoding status for objects +MEDIA_ENCODING_STATUS = ( + ("pending", "Pending"), + ("running", "Running"), + ("fail", "Fail"), + ("success", "Success"), +) + +# the media state of a Media object +# this is set by default according to the portal workflow +MEDIA_STATES = ( + ("private", "Private"), + ("public", "Public"), + ("unlisted", "Unlisted"), +) + +# each uploaded Media gets a media_type hint +# by helpers.get_file_type + +MEDIA_TYPES_SUPPORTED = ( + ("video", "Video"), + ("image", "Image"), + ("pdf", "Pdf"), + ("audio", "Audio"), +) + +ENCODE_EXTENSIONS = ( + ("mp4", "mp4"), + ("webm", "webm"), + ("gif", "gif"), +) + +ENCODE_RESOLUTIONS = ( + (2160, "2160"), + (1440, "1440"), + (1080, "1080"), + (720, "720"), + (480, "480"), + (360, "360"), + (240, "240"), +) + +CODECS = ( + ("h265", "h265"), + ("h264", "h264"), + ("vp9", "vp9"), +) + +ENCODE_EXTENSIONS_KEYS = [extension for extension, name in ENCODE_EXTENSIONS] +ENCODE_RESOLUTIONS_KEYS = [resolution for resolution, name in ENCODE_RESOLUTIONS] + + +def original_media_file_path(instance, filename): + """Helper function to place original media file""" + file_name = "{0}.{1}".format(instance.uid.hex, helpers.get_file_name(filename)) + return settings.MEDIA_UPLOAD_DIR + "user/{0}/{1}".format( + instance.user.username, file_name + ) + + +def encoding_media_file_path(instance, filename): + """Helper function to place encoded media file""" + + file_name = "{0}.{1}".format( + instance.media.uid.hex, helpers.get_file_name(filename) + ) + return settings.MEDIA_ENCODING_DIR + "{0}/{1}/{2}".format( + instance.profile.id, instance.media.user.username, file_name + ) + + +def original_thumbnail_file_path(instance, filename): + """Helper function to place original media thumbnail file""" + + return settings.THUMBNAIL_UPLOAD_DIR + "user/{0}/{1}".format( + instance.user.username, filename + ) + + +def subtitles_file_path(instance, filename): + """Helper function to place subtitle file""" + + return settings.SUBTITLES_UPLOAD_DIR + "user/{0}/{1}".format( + instance.media.user.username, filename + ) + + +def category_thumb_path(instance, filename): + """Helper function to place category thumbnail file""" + + file_name = "{0}.{1}".format(instance.uid.hex, helpers.get_file_name(filename)) + return settings.MEDIA_UPLOAD_DIR + "categories/{0}".format(file_name) + + +class Media(models.Model): + """The most important model for MediaCMS""" + + add_date = models.DateTimeField( + "Date produced", blank=True, null=True, db_index=True + ) + + allow_download = models.BooleanField( + default=True, help_text="Whether option to download media is shown" + ) + + category = models.ManyToManyField( + "Category", blank=True, help_text="Media can be part of one or more categories" + ) + + channel = models.ForeignKey( + "users.Channel", + on_delete=models.CASCADE, + blank=True, + null=True, + help_text="Media can exist in one or no Channels", + ) + + description = models.TextField(blank=True) + + dislikes = models.IntegerField(default=0) + + duration = models.IntegerField(default=0) + + edit_date = models.DateTimeField(auto_now=True) + + enable_comments = models.BooleanField( + default=True, help_text="Whether comments will be allowed for this media" + ) + + encoding_status = models.CharField( + max_length=20, choices=MEDIA_ENCODING_STATUS, default="pending", db_index=True + ) + + featured = models.BooleanField( + default=False, + db_index=True, + help_text="Whether media is globally featured by a MediaCMS editor", + ) + + friendly_token = models.CharField( + blank=True, max_length=12, db_index=True, help_text="Identifier for the Media" + ) + + hls_file = models.CharField( + max_length=1000, blank=True, help_text="Path to HLS file for videos" + ) + + is_reviewed = models.BooleanField( + default=settings.MEDIA_IS_REVIEWED, + db_index=True, + help_text="Whether media is reviewed, so it can appear on public listings", + ) + + license = models.ForeignKey( + "License", on_delete=models.CASCADE, db_index=True, blank=True, null=True + ) + + likes = models.IntegerField(db_index=True, default=1) + + listable = models.BooleanField( + default=False, help_text="Whether it will appear on listings" + ) + + md5sum = models.CharField( + max_length=50, blank=True, null=True, help_text="Not exposed, used internally" + ) + + media_file = models.FileField( + "media file", + upload_to=original_media_file_path, + max_length=500, + help_text="media file", + ) + + media_info = models.TextField(blank=True, help_text="extracted media metadata info") + + media_type = models.CharField( + max_length=20, + blank=True, + choices=MEDIA_TYPES_SUPPORTED, + db_index=True, + default="video", + ) + + password = models.CharField( + max_length=100, blank=True, help_text="password for private media" + ) + + preview_file_path = models.CharField( + max_length=500, + blank=True, + help_text="preview gif for videos, path in filesystem", + ) + + poster = ProcessedImageField( + upload_to=original_thumbnail_file_path, + processors=[ResizeToFit(width=720, height=None)], + format="JPEG", + options={"quality": 95}, + blank=True, + max_length=500, + help_text="media extracted big thumbnail, shown on media page", + ) + + rating_category = models.ManyToManyField( + "RatingCategory", + blank=True, + help_text="Rating category, if media Rating is allowed", + ) + + reported_times = models.IntegerField( + default=0, help_text="how many time a Medis is reported" + ) + + search = SearchVectorField( + null=True, + help_text="used to store all searchable info and metadata for a Media", + ) + + size = models.CharField( + max_length=20, + blank=True, + null=True, + help_text="media size in bytes, automatically calculated", + ) + + sprites = models.FileField( + upload_to=original_thumbnail_file_path, + blank=True, + max_length=500, + help_text="sprites file, only for videos, displayed on the video player", + ) + + state = models.CharField( + max_length=20, + choices=MEDIA_STATES, + default=helpers.get_portal_workflow(), + db_index=True, + help_text="state of Media", + ) + + tags = models.ManyToManyField( + "Tag", blank=True, help_text="select one or more out of the existing tags" + ) + + title = models.CharField( + max_length=100, help_text="media title", blank=True, db_index=True + ) + + thumbnail = ProcessedImageField( + upload_to=original_thumbnail_file_path, + processors=[ResizeToFit(width=344, height=None)], + format="JPEG", + options={"quality": 95}, + blank=True, + max_length=500, + help_text="media extracted small thumbnail, shown on listings", + ) + + thumbnail_time = models.FloatField( + blank=True, null=True, help_text="Time on video that a thumbnail will be taken" + ) + + uid = models.UUIDField( + unique=True, default=uuid.uuid4, help_text="A unique identifier for the Media" + ) + + uploaded_thumbnail = ProcessedImageField( + upload_to=original_thumbnail_file_path, + processors=[ResizeToFit(width=344, height=None)], + format="JPEG", + options={"quality": 85}, + blank=True, + max_length=500, + help_text="thumbnail from uploaded_poster field", + ) + + uploaded_poster = ProcessedImageField( + verbose_name="Upload image", + help_text="This image will characterize the media", + upload_to=original_thumbnail_file_path, + processors=[ResizeToFit(width=720, height=None)], + format="JPEG", + options={"quality": 85}, + blank=True, + max_length=500, + ) + + user = models.ForeignKey( + "users.User", on_delete=models.CASCADE, help_text="user that uploads the media" + ) + + user_featured = models.BooleanField(default=False, help_text="Featured by the user") + + video_height = models.IntegerField(default=1) + + views = models.IntegerField(db_index=True, default=1) + + # keep track if media file has changed, on saves + __original_media_file = None + __original_thumbnail_time = None + __original_uploaded_poster = None + + class Meta: + ordering = ["-add_date"] + indexes = [ + # TODO: check with pgdash.io or other tool what index need be + # removed + GinIndex(fields=["search"]) + ] + + def __str__(self): + return self.title + + def __init__(self, *args, **kwargs): + super(Media, self).__init__(*args, **kwargs) + # keep track if media file has changed, on saves + # thus know when another media was uploaded + # or when thumbnail time change - for videos to + # grep for thumbnail, or even when a new image + # was added as the media poster + self.__original_media_file = self.media_file + self.__original_thumbnail_time = self.thumbnail_time + self.__original_uploaded_poster = self.uploaded_poster + + def save(self, *args, **kwargs): + + if not self.title: + self.title = self.media_file.path.split("/")[-1] + + strip_text_items = ["title", "description"] + for item in strip_text_items: + setattr(self, item, strip_tags(getattr(self, item, None))) + self.title = self.title[:99] + + # if thumbnail_time specified, keep up to single digit + if self.thumbnail_time: + self.thumbnail_time = round(self.thumbnail_time, 1) + + # by default get an add_date of now + if not self.add_date: + self.add_date = timezone.now() + + if not self.friendly_token: + # get a unique identifier + while True: + friendly_token = helpers.produce_friendly_token() + if not Media.objects.filter(friendly_token=friendly_token): + self.friendly_token = friendly_token + break + + if self.pk: + # media exists + + # check case where another media file was uploaded + if self.media_file != self.__original_media_file: + # set this otherwise gets to infinite loop + self.__original_media_file = self.media_file + self.media_init() + + # for video files, if user specified a different time + # to automatically grub thumbnail + if self.thumbnail_time != self.__original_thumbnail_time: + self.__original_thumbnail_time = self.thumbnail_time + self.set_thumbnail(force=True) + else: + # media is going to be created now + # after media is saved, post_save signal will call media_init function + # to take care of post save steps + + self.state = helpers.get_default_state(user=self.user) + + # condition to appear on listings + if ( + self.state == "public" + and self.encoding_status == "success" + and self.is_reviewed == True + ): + self.listable = True + else: + self.listable = False + + super(Media, self).save(*args, **kwargs) + + # produce a thumbnail out of an uploaded poster + # will run only when a poster is uploaded for the first time + if ( + self.uploaded_poster + and self.uploaded_poster != self.__original_uploaded_poster + ): + with open(self.uploaded_poster.path, "rb") as f: + + # set this otherwise gets to infinite loop + self.__original_uploaded_poster = self.uploaded_poster + + myfile = File(f) + thumbnail_name = helpers.get_file_name(self.uploaded_poster.path) + self.uploaded_thumbnail.save(content=myfile, name=thumbnail_name) + + def update_search_vector(self): + """ + Update SearchVector field of SearchModel using raw SQL + search field is used to store SearchVector + """ + db_table = self._meta.db_table + + # first get anything interesting out of the media + # that needs to be search able + + a_tags = b_tags = "" + if self.id: + a_tags = " ".join([tag.title for tag in self.tags.all()]) + b_tags = " ".join([tag.title.replace("-", " ") for tag in self.tags.all()]) + + items = [ + helpers.clean_query(self.title), + self.user.username, + self.user.email, + self.user.name, + helpers.clean_query(self.description), + a_tags, + b_tags, + ] + items = [item for item in items if item] + text = " ".join(items) + text = " ".join( + [token for token in text.lower().split(" ") if token not in STOP_WORDS] + ) + + sql_code = """ + UPDATE {db_table} SET search = to_tsvector( + '{config}', '{text}' + ) WHERE {db_table}.id = {id} + """.format( + db_table=db_table, config="simple", text=text, id=self.id + ) + + try: + with connection.cursor() as cursor: + cursor.execute(sql_code) + except BaseException: + pass # TODO:add log + return True + + def media_init(self): + """Normally this is called when a media is uploaded + Performs all related tasks, as check for media type, + video duration, encode + """ + + self.set_media_type() + if self.media_type == "video": + self.set_thumbnail(force=True) + self.produce_sprite_from_video() + self.encode() + elif self.media_type == "image": + self.set_thumbnail(force=True) + return True + + def set_media_type(self, save=True): + """Sets media type on Media + Set encoding_status as success for non video + content since all listings filter for encoding_status success + """ + + kind = helpers.get_file_type(self.media_file.path) + if kind is not None: + if kind == "image": + self.media_type = "image" + elif kind == "pdf": + self.media_type = "pdf" + + if self.media_type in ["image", "pdf"]: + self.encoding_status = "success" + else: + ret = helpers.media_file_info(self.media_file.path) + if ret.get("fail"): + self.media_type = "" + self.encoding_status = "fail" + elif ret.get("is_video") or ret.get("is_audio"): + try: + self.media_info = json.dumps(ret) + except TypeError: + self.media_info = "" + self.md5sum = ret.get("md5sum") + self.size = helpers.show_file_size(ret.get("file_size")) + else: + self.media_type = "" + self.encoding_status = "fail" + + if ret.get("is_video"): + # case where Media is video. try to set useful + # metadata as duration/height + self.media_type = "video" + self.duration = int(round(float(ret.get("video_duration", 0)))) + self.video_height = int(ret.get("video_height")) + elif ret.get("is_audio"): + self.media_type = "audio" + self.duration = int(float(ret.get("audio_info", {}).get("duration", 0))) + self.encoding_status = "success" + + if save: + self.save( + update_fields=[ + "listable", + "media_type", + "duration", + "media_info", + "video_height", + "size", + "md5sum", + "encoding_status", + ] + ) + return True + + def set_thumbnail(self, force=False): + """sets thumbnail for media + For video call function to produce thumbnail and poster + For image save thumbnail and poster, this will perform + resize action + """ + if force or (not self.thumbnail): + if self.media_type == "video": + self.produce_thumbnails_from_video() + if self.media_type == "image": + with open(self.media_file.path, "rb") as f: + myfile = File(f) + thumbnail_name = ( + helpers.get_file_name(self.media_file.path) + ".jpg" + ) + self.thumbnail.save(content=myfile, name=thumbnail_name) + self.poster.save(content=myfile, name=thumbnail_name) + return True + + def produce_thumbnails_from_video(self): + """Produce thumbnail and poster for media + Only for video types. Uses ffmpeg + """ + if not self.media_type == "video": + return False + + if self.thumbnail_time and 0 <= self.thumbnail_time < self.duration: + thumbnail_time = self.thumbnail_time + else: + thumbnail_time = round(random.uniform(0, self.duration - 0.1), 1) + self.thumbnail_time = thumbnail_time # so that it gets saved + + tf = helpers.create_temp_file(suffix=".jpg") + command = [ + settings.FFMPEG_COMMAND, + "-ss", + str( + thumbnail_time + ), # -ss need to be firt here otherwise time taken is huge + "-i", + self.media_file.path, + "-vframes", + "1", + "-y", + tf, + ] + ret = helpers.run_command(command) + + if os.path.exists(tf) and helpers.get_file_type(tf) == "image": + with open(tf, "rb") as f: + myfile = File(f) + thumbnail_name = helpers.get_file_name(self.media_file.path) + ".jpg" + self.thumbnail.save(content=myfile, name=thumbnail_name) + self.poster.save(content=myfile, name=thumbnail_name) + helpers.rm_file(tf) + return True + + def produce_sprite_from_video(self): + """Start a task that will produce a sprite file + To be used on the video player + """ + + from . import tasks + + tasks.produce_sprite_from_video.delay(self.friendly_token) + return True + + def encode(self, profiles=[], force=True, chunkize=True): + """Start video encoding tasks + Create a task per EncodeProfile object, after checking height + so that no EncodeProfile for highter heights than the video + are created + """ + + if not profiles: + profiles = EncodeProfile.objects.filter(active=True) + profiles = list(profiles) + + from . import tasks + + # attempt to break media file in chunks + if self.duration > settings.CHUNKIZE_VIDEO_DURATION and chunkize: + + for profile in profiles: + + if profile.extension == "gif": + profiles.remove(profile) + encoding = Encoding(media=self, profile=profile) + encoding.save() + enc_url = settings.SSL_FRONTEND_HOST + encoding.get_absolute_url() + tasks.encode_media.apply_async( + args=[self.friendly_token, profile.id, encoding.id, enc_url], + kwargs={"force": force}, + priority=0, + ) + profiles = [p.id for p in profiles] + tasks.chunkize_media.delay(self.friendly_token, profiles, force=force) + else: + for profile in profiles: + if profile.extension != "gif": + if self.video_height and self.video_height < profile.resolution: + if ( + profile.resolution + not in settings.MINIMUM_RESOLUTIONS_TO_ENCODE + ): + continue + encoding = Encoding(media=self, profile=profile) + encoding.save() + enc_url = settings.SSL_FRONTEND_HOST + encoding.get_absolute_url() + if profile.resolution in settings.MINIMUM_RESOLUTIONS_TO_ENCODE: + priority = 9 + else: + priority = 0 + tasks.encode_media.apply_async( + args=[self.friendly_token, profile.id, encoding.id, enc_url], + kwargs={"force": force}, + priority=priority, + ) + + return True + + def post_encode_actions(self, encoding=None, action=None): + """perform things after encode has run + whether it has failed or succeeded + """ + + self.set_encoding_status() + + # set a preview url + if encoding: + if self.media_type == "video" and encoding.profile.extension == "gif": + if action == "delete": + self.preview_file_path = "" + else: + self.preview_file_path = encoding.media_file.path + self.save(update_fields=["listable", "preview_file_path"]) + + self.save(update_fields=["encoding_status", "listable"]) + + if ( + encoding + and encoding.status == "success" + and encoding.profile.codec == "h264" + and action == "add" + ): + from . import tasks + + tasks.create_hls(self.friendly_token) + + return True + + def set_encoding_status(self): + """Set encoding_status for videos + Set success if at least one mp4 exists + """ + mp4_statuses = set( + encoding.status + for encoding in self.encodings.filter(profile__extension="mp4", chunk=False) + ) + + if not mp4_statuses: + encoding_status = "pending" + elif "success" in mp4_statuses: + encoding_status = "success" + elif "running" in mp4_statuses: + encoding_status = "running" + else: + encoding_status = "fail" + self.encoding_status = encoding_status + + return True + + @property + def encodings_info(self, full=False): + """Property used on serializers""" + + ret = {} + chunks_ret = {} + + if self.media_type not in ["video"]: + return ret + for key in ENCODE_RESOLUTIONS_KEYS: + ret[key] = {} + for encoding in self.encodings.select_related("profile").filter(chunk=False): + if encoding.profile.extension == "gif": + continue + enc = self.get_encoding_info(encoding, full=full) + resolution = encoding.profile.resolution + ret[resolution][encoding.profile.codec] = enc + + # TODO: the following code is untested/needs optimization + + # if a file is broken in chunks and they are being + # encoded, the final encoding file won't appear until + # they are finished. Thus, produce the info for these + if full: + extra = [] + for encoding in self.encodings.select_related("profile").filter(chunk=True): + resolution = encoding.profile.resolution + if not ret[resolution].get(encoding.profile.codec): + extra.append(encoding.profile.codec) + for codec in extra: + ret[resolution][codec] = {} + v = self.encodings.filter(chunk=True, profile__codec=codec).values( + "progress" + ) + ret[resolution][codec]["progress"] = ( + sum([p["progress"] for p in v]) / v.count() + ) + # TODO; status/logs/errors + return ret + + def get_encoding_info(self, encoding, full=False): + """Property used on serializers""" + + ep = {} + ep["title"] = encoding.profile.name + ep["url"] = encoding.media_encoding_url + ep["progress"] = encoding.progress + ep["size"] = encoding.size + ep["encoding_id"] = encoding.id + ep["status"] = encoding.status + + if full: + ep["logs"] = encoding.logs + ep["worker"] = encoding.worker + ep["retries"] = encoding.retries + if encoding.total_run_time: + ep["total_run_time"] = encoding.total_run_time + if encoding.commands: + ep["commands"] = encoding.commands + ep["time_started"] = encoding.add_date + ep["updated_time"] = encoding.update_date + return ep + + @property + def categories_info(self): + """Property used on serializers""" + + ret = [] + for cat in self.category.all(): + ret.append({"title": cat.title, "url": cat.get_absolute_url()}) + return ret + + @property + def tags_info(self): + """Property used on serializers""" + + ret = [] + for tag in self.tags.all(): + ret.append({"title": tag.title, "url": tag.get_absolute_url()}) + return ret + + @property + def original_media_url(self): + """Property used on serializers""" + + if settings.SHOW_ORIGINAL_MEDIA: + return helpers.url_from_path(self.media_file.path) + else: + return None + + @property + def thumbnail_url(self): + """Property used on serializers + Prioritize uploaded_thumbnail, if exists, then thumbnail + that is auto-generated + """ + + if self.uploaded_thumbnail: + return helpers.url_from_path(self.uploaded_thumbnail.path) + if self.thumbnail: + return helpers.url_from_path(self.thumbnail.path) + return None + + @property + def poster_url(self): + """Property used on serializers + Prioritize uploaded_poster, if exists, then poster + that is auto-generated + """ + + if self.uploaded_poster: + return helpers.url_from_path(self.uploaded_poster.path) + if self.poster: + return helpers.url_from_path(self.poster.path) + return None + + @property + def subtitles_info(self): + """Property used on serializers + Returns subtitles info + """ + + ret = [] + for subtitle in self.subtitles.all(): + ret.append( + { + "src": helpers.url_from_path(subtitle.subtitle_file.path), + "srclang": subtitle.language.code, + "label": subtitle.language.title, + } + ) + return ret + + @property + def sprites_url(self): + """Property used on serializers + Returns sprites url + """ + + if self.sprites: + return helpers.url_from_path(self.sprites.path) + return None + + @property + def preview_url(self): + """Property used on serializers + Returns preview url + """ + + if self.preview_file_path: + return helpers.url_from_path(self.preview_file_path) + + # get preview_file out of the encodings, since some times preview_file_path + # is empty but there is the gif encoding! + preview_media = self.encodings.filter(profile__extension="gif").first() + if preview_media and preview_media.media_file: + return helpers.url_from_path(preview_media.media_file.path) + return None + + @property + def hls_info(self): + """Property used on serializers + Returns hls info, curated to be read by video.js + """ + + res = {} + if self.hls_file: + if os.path.exists(self.hls_file): + hls_file = self.hls_file + p = os.path.dirname(hls_file) + m3u8_obj = m3u8.load(hls_file) + if os.path.exists(hls_file): + res["master_file"] = helpers.url_from_path(hls_file) + for iframe_playlist in m3u8_obj.iframe_playlists: + uri = os.path.join(p, iframe_playlist.uri) + if os.path.exists(uri): + resolution = iframe_playlist.iframe_stream_info.resolution[ + 1 + ] + res["{}_iframe".format(resolution)] = helpers.url_from_path( + uri + ) + for playlist in m3u8_obj.playlists: + uri = os.path.join(p, playlist.uri) + if os.path.exists(uri): + resolution = playlist.stream_info.resolution[1] + res[ + "{}_playlist".format(resolution) + ] = helpers.url_from_path(uri) + return res + + @property + def author_name(self): + return self.user.name + + @property + def author_username(self): + return self.user.username + + def author_profile(self): + return self.user.get_absolute_url() + + def author_thumbnail(self): + return helpers.url_from_path(self.user.logo.path) + + def get_absolute_url(self, api=False, edit=False): + if edit: + return reverse("edit_media") + "?m={0}".format(self.friendly_token) + if api: + return reverse( + "api_get_media", kwargs={"friendly_token": self.friendly_token} + ) + else: + return reverse("get_media") + "?m={0}".format(self.friendly_token) + + @property + def edit_url(self): + return self.get_absolute_url(edit=True) + + @property + def add_subtitle_url(self): + return "/add_subtitle?m=%s" % self.friendly_token + + @property + def ratings_info(self): + """Property used on ratings + If ratings functionality enabled + """ + + # to be used if user ratings are allowed + ret = [] + if not settings.ALLOW_RATINGS: + return [] + for category in self.rating_category.filter(enabled=True): + ret.append( + { + "score": -1, + # default score, means no score. In case user has already + # rated for this media, it will be populated + "category_id": category.id, + "category_title": category.title, + } + ) + return ret + + +class License(models.Model): + """A Base license model to be used in Media""" + + title = models.CharField(max_length=100, unique=True) + description = models.TextField(blank=True) + + def __str__(self): + return self.title + + +class Category(models.Model): + """A Category base model""" + + uid = models.UUIDField(unique=True, default=uuid.uuid4) + + add_date = models.DateTimeField(auto_now_add=True) + + title = models.CharField(max_length=100, unique=True, db_index=True) + + description = models.TextField(blank=True) + + user = models.ForeignKey( + "users.User", on_delete=models.CASCADE, blank=True, null=True + ) + + is_global = models.BooleanField( + default=False, help_text="global categories or user specific" + ) + + media_count = models.IntegerField(default=0, help_text="number of media") + + thumbnail = ProcessedImageField( + upload_to=category_thumb_path, + processors=[ResizeToFit(width=344, height=None)], + format="JPEG", + options={"quality": 85}, + blank=True, + ) + + listings_thumbnail = models.CharField( + max_length=400, blank=True, null=True, help_text="Thumbnail to show on listings" + ) + + def __str__(self): + return self.title + + class Meta: + ordering = ["title"] + verbose_name_plural = "Categories" + + def get_absolute_url(self): + return reverse("search") + "?c={0}".format(self.title) + + def update_category_media(self): + """Set media_count""" + + self.media_count = Media.objects.filter(listable=True, category=self).count() + self.save(update_fields=["media_count"]) + return True + + @property + def thumbnail_url(self): + """Return thumbnail for category + prioritize processed value of listings_thumbnail + then thumbnail + """ + + if self.listings_thumbnail: + return self.listings_thumbnail + if self.thumbnail: + return helpers.url_from_path(self.thumbnail.path) + + media = ( + Media.objects.filter(category=self, state="public") + .order_by("-views") + .first() + ) + if media: + return media.thumbnail_url + + return None + + def save(self, *args, **kwargs): + strip_text_items = ["title", "description"] + for item in strip_text_items: + setattr(self, item, strip_tags(getattr(self, item, None))) + super(Category, self).save(*args, **kwargs) + + +class Tag(models.Model): + """A Tag model""" + + title = models.CharField(max_length=100, unique=True, db_index=True) + + user = models.ForeignKey( + "users.User", on_delete=models.CASCADE, blank=True, null=True + ) + + media_count = models.IntegerField(default=0, help_text="number of media") + + listings_thumbnail = models.CharField( + max_length=400, + blank=True, + null=True, + help_text="Thumbnail to show on listings", + db_index=True, + ) + + def __str__(self): + return self.title + + class Meta: + ordering = ["title"] + + def get_absolute_url(self): + return reverse("search") + "?t={0}".format(self.title) + + def update_tag_media(self): + self.media_count = Media.objects.filter( + state="public", is_reviewed=True, tags=self + ).count() + self.save(update_fields=["media_count"]) + return True + + def save(self, *args, **kwargs): + self.title = slugify(self.title[:99]) + strip_text_items = ["title"] + for item in strip_text_items: + setattr(self, item, strip_tags(getattr(self, item, None))) + super(Tag, self).save(*args, **kwargs) + + @property + def thumbnail_url(self): + if self.listings_thumbnail: + return self.listings_thumbnail + media = ( + Media.objects.filter(tags=self, state="public").order_by("-views").first() + ) + if media: + return media.thumbnail_url + + return None + + +class EncodeProfile(models.Model): + """Encode Profile model + keeps information for each profile + """ + + name = models.CharField(max_length=90) + + extension = models.CharField(max_length=10, choices=ENCODE_EXTENSIONS) + + resolution = models.IntegerField(choices=ENCODE_RESOLUTIONS, blank=True, null=True) + + codec = models.CharField(max_length=10, choices=CODECS, blank=True, null=True) + + description = models.TextField(blank=True, help_text="description") + + active = models.BooleanField(default=True) + + def __str__(self): + return self.name + + class Meta: + ordering = ["resolution"] + + +class Encoding(models.Model): + """Encoding Media Instances""" + + add_date = models.DateTimeField(auto_now_add=True) + + commands = models.TextField(blank=True, help_text="commands run") + + chunk = models.BooleanField(default=False, db_index=True, help_text="is chunk?") + + chunk_file_path = models.CharField(max_length=400, blank=True) + + chunks_info = models.TextField(blank=True) + + logs = models.TextField(blank=True) + + md5sum = models.CharField(max_length=50, blank=True, null=True) + + media = models.ForeignKey(Media, on_delete=models.CASCADE, related_name="encodings") + + media_file = models.FileField( + "encoding file", upload_to=encoding_media_file_path, blank=True, max_length=500 + ) + + profile = models.ForeignKey(EncodeProfile, on_delete=models.CASCADE) + + progress = models.PositiveSmallIntegerField(default=0) + + update_date = models.DateTimeField(auto_now=True) + + retries = models.IntegerField(default=0) + + size = models.CharField(max_length=20, blank=True) + + status = models.CharField( + max_length=20, choices=MEDIA_ENCODING_STATUS, default="pending" + ) + + temp_file = models.CharField(max_length=400, blank=True) + + task_id = models.CharField(max_length=100, blank=True) + + total_run_time = models.IntegerField(default=0) + + worker = models.CharField(max_length=100, blank=True) + + @property + def media_encoding_url(self): + if self.media_file: + return helpers.url_from_path(self.media_file.path) + return None + + @property + def media_chunk_url(self): + if self.chunk_file_path: + return helpers.url_from_path(self.chunk_file_path) + return None + + def save(self, *args, **kwargs): + if self.media_file: + cmd = ["stat", "-c", "%s", self.media_file.path] + stdout = helpers.run_command(cmd).get("out") + if stdout: + size = int(stdout.strip()) + self.size = helpers.show_file_size(size) + if self.chunk_file_path and not self.md5sum: + cmd = ["md5sum", self.chunk_file_path] + stdout = helpers.run_command(cmd).get("out") + if stdout: + md5sum = stdout.strip().split()[0] + self.md5sum = md5sum + + super(Encoding, self).save(*args, **kwargs) + + def set_progress(self, progress, commit=True): + if isinstance(progress, int): + if 0 <= progress <= 100: + self.progress = progress + self.save(update_fields=["progress"]) + return True + return False + + def __str__(self): + return "{0}-{1}".format(self.profile.name, self.media.title) + + def get_absolute_url(self): + return reverse("api_get_encoding", kwargs={"encoding_id": self.id}) + + +class Language(models.Model): + """Language model + to be used with Subtitles + """ + + code = models.CharField(max_length=12, help_text="language code") + + title = models.CharField(max_length=100, help_text="language code") + + class Meta: + ordering = ["id"] + + def __str__(self): + return "{0}-{1}".format(self.code, self.title) + + +class Subtitle(models.Model): + """Subtitles model""" + + language = models.ForeignKey(Language, on_delete=models.CASCADE) + + media = models.ForeignKey(Media, on_delete=models.CASCADE, related_name="subtitles") + + subtitle_file = models.FileField( + "Subtitle/CC file", + help_text="File has to be WebVTT format", + upload_to=subtitles_file_path, + max_length=500, + ) + + user = models.ForeignKey("users.User", on_delete=models.CASCADE) + + def __str__(self): + return "{0}-{1}".format(self.media.title, self.language.title) + + +class RatingCategory(models.Model): + """Rating Category + Facilitate user ratings. + One or more rating categories per Category can exist + will be shown to the media if they are enabled + """ + + description = models.TextField(blank=True) + + enabled = models.BooleanField(default=True) + + title = models.CharField(max_length=200, unique=True, db_index=True) + + class Meta: + verbose_name_plural = "Rating Categories" + + def __str__(self): + return "{0}".format(self.title) + + +def validate_rating(value): + if -1 >= value or value > 5: + raise ValidationError("score has to be between 0 and 5") + + +class Rating(models.Model): + """User Rating""" + + add_date = models.DateTimeField(auto_now_add=True) + + media = models.ForeignKey(Media, on_delete=models.CASCADE, related_name="ratings") + + rating_category = models.ForeignKey(RatingCategory, on_delete=models.CASCADE) + + score = models.IntegerField(validators=[validate_rating]) + + user = models.ForeignKey("users.User", on_delete=models.CASCADE) + + class Meta: + verbose_name_plural = "Ratings" + indexes = [ + models.Index(fields=["user", "media"]), + ] + unique_together = ("user", "media", "rating_category") + + def __str__(self): + return "{0}, rate for {1} for category {2}".format( + self.user.username, self.media.title, self.rating_category.title + ) + + +class Playlist(models.Model): + """Playlists model""" + + add_date = models.DateTimeField(auto_now_add=True, db_index=True) + + description = models.TextField(blank=True, help_text="description") + + friendly_token = models.CharField(blank=True, max_length=12, db_index=True) + + media = models.ManyToManyField(Media, through="playlistmedia", blank=True) + + title = models.CharField(max_length=100, db_index=True) + + uid = models.UUIDField(unique=True, default=uuid.uuid4) + + user = models.ForeignKey( + "users.User", on_delete=models.CASCADE, db_index=True, related_name="playlists" + ) + + def __str__(self): + return self.title + + @property + def media_count(self): + return self.media.count() + + def get_absolute_url(self, api=False): + if api: + return reverse( + "api_get_playlist", kwargs={"friendly_token": self.friendly_token} + ) + else: + return reverse( + "get_playlist", kwargs={"friendly_token": self.friendly_token} + ) + + @property + def url(self): + return self.get_absolute_url() + + @property + def api_url(self): + return self.get_absolute_url(api=True) + + def user_thumbnail_url(self): + if self.user.logo: + return helpers.url_from_path(self.user.logo.path) + return None + + def set_ordering(self, media, ordering): + if media not in self.media.all(): + return False + pm = PlaylistMedia.objects.filter(playlist=self, media=media).first() + if pm and isinstance(ordering, int) and 0 < ordering: + pm.ordering = ordering + pm.save() + return True + return False + + def save(self, *args, **kwargs): + strip_text_items = ["title", "description"] + for item in strip_text_items: + setattr(self, item, strip_tags(getattr(self, item, None))) + self.title = self.title[:99] + + if not self.friendly_token: + while True: + friendly_token = helpers.produce_friendly_token() + if not Playlist.objects.filter(friendly_token=friendly_token): + self.friendly_token = friendly_token + break + super(Playlist, self).save(*args, **kwargs) + + @property + def thumbnail_url(self): + pm = self.playlistmedia_set.first() + if pm: + return helpers.url_from_path(pm.media.thumbnail.path) + return None + + +class PlaylistMedia(models.Model): + """Helper model to store playlist specific media""" + + action_date = models.DateTimeField(auto_now=True) + + media = models.ForeignKey(Media, on_delete=models.CASCADE) + + playlist = models.ForeignKey(Playlist, on_delete=models.CASCADE) + + ordering = models.IntegerField(default=1) + + class Meta: + ordering = ["ordering", "-action_date"] + + +class Comment(MPTTModel): + """Comments model""" + + add_date = models.DateTimeField(auto_now_add=True) + + media = models.ForeignKey( + Media, on_delete=models.CASCADE, db_index=True, related_name="comments" + ) + + parent = TreeForeignKey( + "self", on_delete=models.CASCADE, null=True, blank=True, related_name="children" + ) + + text = models.TextField(help_text="text") + + uid = models.UUIDField(unique=True, default=uuid.uuid4) + + user = models.ForeignKey("users.User", on_delete=models.CASCADE, db_index=True) + + class MPTTMeta: + order_insertion_by = ["add_date"] + + def __str__(self): + return "On {0} by {1}".format(self.media.title, self.user.username) + + def save(self, *args, **kwargs): + strip_text_items = ["text"] + for item in strip_text_items: + setattr(self, item, strip_tags(getattr(self, item, None))) + + if self.text: + self.text = self.text[: settings.MAX_CHARS_FOR_COMMENT] + + super(Comment, self).save(*args, **kwargs) + + def get_absolute_url(self): + return reverse("get_media") + "?m={0}".format(self.media.friendly_token) + + @property + def media_url(self): + return self.get_absolute_url() + + +@receiver(post_save, sender=Media) +def media_save(sender, instance, created, **kwargs): + # media_file path is not set correctly until mode is saved + # post_save signal will take care of calling a few functions + # once model is saved + # SOS: do not put anything here, as if more logic is added, + # we have to disconnect signal to avoid infinite recursion + if created: + instance.media_init() + notify_users(friendly_token=instance.friendly_token, action="media_added") + + instance.user.update_user_media() + if instance.category.all(): + # this won't catch when a category + # is removed from a media, which is what we want... + for category in instance.category.all(): + category.update_category_media() + + if instance.tags.all(): + for tag in instance.tags.all(): + tag.update_tag_media() + + instance.update_search_vector() + + +@receiver(pre_delete, sender=Media) +def media_file_pre_delete(sender, instance, **kwargs): + if instance.category.all(): + for category in instance.category.all(): + instance.category.remove(category) + category.update_category_media() + if instance.tags.all(): + for tag in instance.tags.all(): + instance.tags.remove(tag) + tag.update_tag_media() + + +@receiver(post_delete, sender=Media) +def media_file_delete(sender, instance, **kwargs): + """ + Deletes file from filesystem + when corresponding `Media` object is deleted. + """ + + if instance.media_file: + helpers.rm_file(instance.media_file.path) + if instance.thumbnail: + helpers.rm_file(instance.thumbnail.path) + if instance.poster: + helpers.rm_file(instance.poster.path) + if instance.uploaded_thumbnail: + helpers.rm_file(instance.uploaded_thumbnail.path) + if instance.uploaded_poster: + helpers.rm_file(instance.uploaded_poster.path) + if instance.sprites: + helpers.rm_file(instance.sprites.path) + if instance.hls_file: + p = os.path.dirname(instance.hls_file) + helpers.rm_dir(p) + instance.user.update_user_media() + + +@receiver(m2m_changed, sender=Media.category.through) +def media_m2m(sender, instance, **kwargs): + if instance.category.all(): + for category in instance.category.all(): + category.update_category_media() + if instance.tags.all(): + for tag in instance.tags.all(): + tag.update_tag_media() + + +@receiver(post_save, sender=Encoding) +def encoding_file_save(sender, instance, created, **kwargs): + """Performs actions on encoding file delete + For example, if encoding is a chunk file, with encoding_status success, + perform a check if this is the final chunk file of a media, then + concatenate chunks, create final encoding file and delete chunks + """ + + if instance.chunk and instance.status == "success": + # a chunk got completed + + # check if all chunks are OK + # then concatenate to new Encoding - and remove chunks + # this should run only once! + if instance.media_file: + try: + orig_chunks = json.loads(instance.chunks_info).keys() + except BaseException: + instance.delete() + return False + + chunks = Encoding.objects.filter( + media=instance.media, + profile=instance.profile, + chunks_info=instance.chunks_info, + chunk=True, + ).order_by("add_date") + + complete = True + + # perform validation, make sure everything is there + for chunk in orig_chunks: + if not chunks.filter(chunk_file_path=chunk): + complete = False + break + + for chunk in chunks: + if not (chunk.media_file and chunk.media_file.path): + complete = False + break + + if complete: + # concatenate chunks and create final encoding file + chunks_paths = [f.media_file.path for f in chunks] + + with tempfile.TemporaryDirectory( + dir=settings.TEMP_DIRECTORY + ) as temp_dir: + seg_file = helpers.create_temp_file(suffix=".txt", dir=temp_dir) + tf = helpers.create_temp_file( + suffix=".{0}".format(instance.profile.extension), dir=temp_dir + ) + with open(seg_file, "w") as ff: + for f in chunks_paths: + ff.write("file {}\n".format(f)) + cmd = [ + settings.FFMPEG_COMMAND, + "-y", + "-f", + "concat", + "-safe", + "0", + "-i", + seg_file, + "-c", + "copy", + "-pix_fmt", + "yuv420p", + "-movflags", + "faststart", + tf, + ] + stdout = helpers.run_command(cmd) + + encoding = Encoding( + media=instance.media, + profile=instance.profile, + status="success", + progress=100, + ) + all_logs = "\n".join([st.logs for st in chunks]) + encoding.logs = "{0}\n{1}\n{2}".format( + chunks_paths, stdout, all_logs + ) + workers = list(set([st.worker for st in chunks])) + encoding.worker = json.dumps({"workers": workers}) + + start_date = min([st.add_date for st in chunks]) + end_date = max([st.update_date for st in chunks]) + encoding.total_run_time = (end_date - start_date).seconds + encoding.save() + + with open(tf, "rb") as f: + myfile = File(f) + output_name = "{0}.{1}".format( + helpers.get_file_name(instance.media.media_file.path), + instance.profile.extension, + ) + encoding.media_file.save(content=myfile, name=output_name) + + # encoding is saved, deleting chunks + # and any other encoding that might exist + # first perform one last validation + # to avoid that this is run twice + if ( + len(orig_chunks) + == Encoding.objects.filter( + media=instance.media, + profile=instance.profile, + chunks_info=instance.chunks_info, + ).count() + ): + # if two chunks are finished at the same time, this + # will be changed + who = Encoding.objects.filter( + media=encoding.media, profile=encoding.profile + ).exclude(id=encoding.id) + who.delete() + else: + encoding.delete() + if not Encoding.objects.filter(chunks_info=instance.chunks_info): + # TODO: in case of remote workers, files should be deleted + # example + # for worker in workers: + # for chunk in json.loads(instance.chunks_info).keys(): + # remove_media_file.delay(media_file=chunk) + for chunk in json.loads(instance.chunks_info).keys(): + helpers.rm_file(chunk) + instance.media.post_encode_actions(encoding=instance, action="add") + + elif instance.chunk and instance.status == "fail": + encoding = Encoding( + media=instance.media, profile=instance.profile, status="fail", progress=100 + ) + + chunks = Encoding.objects.filter( + media=instance.media, chunks_info=instance.chunks_info, chunk=True + ).order_by("add_date") + + chunks_paths = [f.media_file.path for f in chunks] + + all_logs = "\n".join([st.logs for st in chunks]) + encoding.logs = "{0}\n{1}\n{2}".format(chunks_paths, all_logs) + workers = list(set([st.worker for st in chunks])) + encoding.worker = json.dumps({"workers": workers}) + start_date = min([st.add_date for st in chunks]) + end_date = max([st.update_date for st in chunks]) + encoding.total_run_time = (end_date - start_date).seconds + encoding.save() + + who = Encoding.objects.filter( + media=encoding.media, profile=encoding.profile + ).exclude(id=encoding.id) + + who.delete() + pass # TODO: merge with above if, do not repeat code + else: + if instance.status in ["fail", "success"]: + instance.media.post_encode_actions(encoding=instance, action="add") + + encodings = set( + [ + encoding.status + for encoding in Encoding.objects.filter(media=instance.media) + ] + ) + if ("running" in encodings) or ("pending" in encodings): + return + workers = list( + set( + [ + encoding.worker + for encoding in Encoding.objects.filter(media=instance.media) + ] + ) + ) + + +@receiver(post_delete, sender=Encoding) +def encoding_file_delete(sender, instance, **kwargs): + """ + Deletes file from filesystem + when corresponding `Encoding` object is deleted. + """ + + if instance.media_file: + helpers.rm_file(instance.media_file.path) + if not instance.chunk: + instance.media.post_encode_actions(encoding=instance, action="delete") + # delete local chunks, and remote chunks + media file. Only when the + # last encoding of a media is complete diff --git a/files/permissions.py b/files/permissions.py new file mode 100644 index 0000000..5601166 --- /dev/null +++ b/files/permissions.py @@ -0,0 +1,9 @@ +from rest_framework import permissions +from .methods import is_mediacms_editor + + +class IsMediacmsEditor(permissions.BasePermission): + def has_permission(self, request, view): + if is_mediacms_editor(request.user): + return True + return False diff --git a/files/serializers.py b/files/serializers.py new file mode 100644 index 0000000..75afc14 --- /dev/null +++ b/files/serializers.py @@ -0,0 +1,257 @@ +from rest_framework import serializers + +from .models import Media, EncodeProfile, Playlist, Comment, Category, Tag + +# TODO: put them in a more DRY way + + +class MediaSerializer(serializers.ModelSerializer): + # to be used in APIs as show related media + user = serializers.ReadOnlyField(source="user.username") + url = serializers.SerializerMethodField() + api_url = serializers.SerializerMethodField() + thumbnail_url = serializers.SerializerMethodField() + author_profile = serializers.SerializerMethodField() + author_thumbnail = serializers.SerializerMethodField() + + def get_url(self, obj): + return self.context["request"].build_absolute_uri(obj.get_absolute_url()) + + def get_api_url(self, obj): + return self.context["request"].build_absolute_uri( + obj.get_absolute_url(api=True) + ) + + def get_thumbnail_url(self, obj): + return self.context["request"].build_absolute_uri(obj.thumbnail_url) + + def get_author_profile(self, obj): + return self.context["request"].build_absolute_uri(obj.author_profile()) + + def get_author_thumbnail(self, obj): + return self.context["request"].build_absolute_uri(obj.author_thumbnail()) + + class Meta: + model = Media + read_only_fields = ( + "friendly_token", + "user", + "add_date", + "views", + "media_type", + "state", + "duration", + "encoding_status", + "views", + "likes", + "dislikes", + "reported_times", + "size", + "is_reviewed", + ) + fields = ( + "friendly_token", + "url", + "api_url", + "user", + "title", + "description", + "add_date", + "views", + "media_type", + "state", + "duration", + "thumbnail_url", + "is_reviewed", + "url", + "api_url", + "preview_url", + "author_name", + "author_profile", + "author_thumbnail", + "encoding_status", + "views", + "likes", + "dislikes", + "reported_times", + "featured", + "user_featured", + "size", + ) + + +class SingleMediaSerializer(serializers.ModelSerializer): + user = serializers.ReadOnlyField(source="user.username") + url = serializers.SerializerMethodField() + + def get_url(self, obj): + return self.context["request"].build_absolute_uri(obj.get_absolute_url()) + + class Meta: + model = Media + read_only_fields = ( + "friendly_token", + "user", + "add_date", + "views", + "media_type", + "state", + "duration", + "encoding_status", + "views", + "likes", + "dislikes", + "reported_times", + "size", + "video_height", + "is_reviewed", + ) + fields = ( + "url", + "user", + "title", + "description", + "add_date", + "edit_date", + "media_type", + "state", + "duration", + "thumbnail_url", + "poster_url", + "thumbnail_time", + "url", + "sprites_url", + "preview_url", + "author_name", + "author_profile", + "author_thumbnail", + "encodings_info", + "encoding_status", + "views", + "likes", + "dislikes", + "reported_times", + "user_featured", + "original_media_url", + "size", + "video_height", + "enable_comments", + "categories_info", + "is_reviewed", + "edit_url", + "tags_info", + "hls_info", + "license", + "subtitles_info", + "ratings_info", + "add_subtitle_url", + "allow_download", + ) + + +class MediaSearchSerializer(serializers.ModelSerializer): + url = serializers.SerializerMethodField() + + def get_url(self, obj): + return self.context["request"].build_absolute_uri(obj.get_absolute_url()) + + class Meta: + model = Media + fields = ( + "title", + "author_name", + "author_profile", + "thumbnail_url", + "add_date", + "views", + "description", + "friendly_token", + "duration", + "url", + "media_type", + "preview_url", + "categories_info", + ) + + +class EncodeProfileSerializer(serializers.ModelSerializer): + class Meta: + model = EncodeProfile + fields = ("name", "extension", "resolution", "codec", "description") + + +class CategorySerializer(serializers.ModelSerializer): + user = serializers.ReadOnlyField(source="user.username") + + class Meta: + model = Category + fields = ( + "title", + "description", + "is_global", + "media_count", + "user", + "thumbnail_url", + ) + + +class TagSerializer(serializers.ModelSerializer): + class Meta: + model = Tag + fields = ("title", "media_count", "thumbnail_url") + + +class PlaylistSerializer(serializers.ModelSerializer): + user = serializers.ReadOnlyField(source="user.username") + + class Meta: + model = Playlist + read_only_fields = ("add_date", "user") + fields = ( + "add_date", + "title", + "description", + "user", + "media_count", + "url", + "api_url", + "thumbnail_url" + ) + + +class PlaylistDetailSerializer(serializers.ModelSerializer): + user = serializers.ReadOnlyField(source="user.username") + + class Meta: + model = Playlist + read_only_fields = ("add_date", "user") + fields = ( + "title", + "add_date", + "user_thumbnail_url", + "description", + "user", + "media_count", + "url", + "thumbnail_url" + ) + + +class CommentSerializer(serializers.ModelSerializer): + author_profile = serializers.ReadOnlyField(source="user.get_absolute_url") + author_name = serializers.ReadOnlyField(source="user.name") + author_thumbnail_url = serializers.ReadOnlyField(source="user.thumbnail_url") + + class Meta: + model = Comment + read_only_fields = ("add_date", "uid") + fields = ( + "add_date", + "text", + "parent", + "author_thumbnail_url", + "author_profile", + "author_name", + "media_url", + "uid", + ) diff --git a/files/stop_words.py b/files/stop_words.py new file mode 100644 index 0000000..021685f --- /dev/null +++ b/files/stop_words.py @@ -0,0 +1,86 @@ +# -*- coding: utf-8 -*- + + +STOP_WORDS = set( + """ +a about above across after afterwards again against all almost alone along +already also although always am among amongst amount an and another any anyhow +anyone anything anyway anywhere are around as at +back be became because become becomes becoming been before beforehand behind +being below beside besides between beyond both bottom but by +call can cannot ca could +did do does doing done down due during +each eight either eleven else elsewhere empty enough even ever every +everyone everything everywhere except +few fifteen fifty first five for former formerly forty four from front full +further +get give go +had has have he hence her here hereafter hereby herein hereupon hers herself +him himself his how however hundred +i if in indeed into is it its itself +keep +last latter latterly least less +just +made make many may me meanwhile might mine more moreover most mostly move much +must my myself +name namely neither never nevertheless next nine no nobody none noone nor not +nothing now nowhere +of off often on once one only onto or other others otherwise our ours ourselves +out over own +part per perhaps please put +quite +rather re really regarding +same say see seem seemed seeming seems serious several she should show side +since six sixty so some somehow someone something sometime sometimes somewhere +still such +take ten than that the their them themselves then thence there thereafter +thereby therefore therein thereupon these they third this those though three +through throughout thru thus to together too top toward towards twelve twenty +two +under until up unless upon us used using +various very very via was we well were what whatever when whence whenever where +whereafter whereas whereby wherein whereupon wherever whether which while +whither who whoever whole whom whose why will with within without would +yet you your yours yourself yourselves +""".split() +) + +SPANISH_STOP_WORDS = set( + """ +a actualmente acuerdo adelante ademas además adrede afirmó agregó ahi ahora ahí al algo alguna algunas alguno algunos algún alli allí alrededor ambos ampleamos antano antaño ante anterior antes apenas aproximadamente aquel aquella aquellas aquello aquellos aqui aquél aquélla aquéllas aquéllos aquí arriba arribaabajo aseguró asi así atras aun aunque ayer añadió aún +b bajo bastante bien breve buen buena buenas bueno buenos +c cada casi cerca cierta ciertas cierto ciertos cinco claro comentó como con conmigo conocer conseguimos conseguir considera consideró consigo consigue consiguen consigues contigo contra cosas creo cual cuales cualquier cuando cuanta cuantas cuanto cuantos cuatro cuenta cuál cuáles cuándo cuánta cuántas cuánto cuántos cómo +d da dado dan dar de debajo debe deben debido decir dejó del delante demasiado demás dentro deprisa desde despacio despues después detras detrás dia dias dice dicen dicho dieron diferente diferentes dijeron dijo dio donde dos durante día días dónde +e ejemplo el ella ellas ello ellos embargo empleais emplean emplear empleas empleo en encima encuentra enfrente enseguida entonces entre era erais eramos eran eras eres es esa esas ese eso esos esta estaba estabais estaban estabas estad estada estadas estado estados estais estamos estan estando estar estaremos estará estarán estarás estaré estaréis estaría estaríais estaríamos estarían estarías estas este estemos esto estos estoy estuve estuviera estuvierais estuvieran estuvieras estuvieron estuviese estuvieseis estuviesen estuvieses estuvimos estuviste estuvisteis estuviéramos estuviésemos estuvo está estábamos estáis están estás esté estéis estén estés ex excepto existe existen explicó expresó +f fin final fue fuera fuerais fueran fueras fueron fuese fueseis fuesen fueses fui fuimos fuiste fuisteis fuéramos fuésemos +g general gran grandes gueno +h ha haber habia habida habidas habido habidos habiendo habla hablan habremos habrá habrán habrás habré habréis habría habríais habríamos habrían habrías habéis había habíais habíamos habían habías hace haceis hacemos hacen hacer hacerlo haces hacia haciendo hago han has hasta hay haya hayamos hayan hayas hayáis he hecho hemos hicieron hizo horas hoy hube hubiera hubierais hubieran hubieras hubieron hubiese hubieseis hubiesen hubieses hubimos hubiste hubisteis hubiéramos hubiésemos hubo +i igual incluso indicó informo informó intenta intentais intentamos intentan intentar intentas intento ir +j junto +k +l la lado largo las le lejos les llegó lleva llevar lo los luego lugar +m mal manera manifestó mas mayor me mediante medio mejor mencionó menos menudo mi mia mias mientras mio mios mis misma mismas mismo mismos modo momento mucha muchas mucho muchos muy más mí mía mías mío míos +n nada nadie ni ninguna ningunas ninguno ningunos ningún no nos nosotras nosotros nuestra nuestras nuestro nuestros nueva nuevas nuevo nuevos nunca +o ocho os otra otras otro otros +p pais para parece parte partir pasada pasado paìs peor pero pesar poca pocas poco pocos podeis podemos poder podria podriais podriamos podrian podrias podrá podrán podría podrían poner por por qué porque posible primer primera primero primeros principalmente pronto propia propias propio propios proximo próximo próximos pudo pueda puede pueden puedo pues +q qeu que quedó queremos quien quienes quiere quiza quizas quizá quizás quién quiénes qué +r raras realizado realizar realizó repente respecto +s sabe sabeis sabemos saben saber sabes sal salvo se sea seamos sean seas segun segunda segundo según seis ser sera seremos será serán serás seré seréis sería seríais seríamos serían serías seáis señaló si sido siempre siendo siete sigue siguiente sin sino sobre sois sola solamente solas solo solos somos son soy soyos su supuesto sus suya suyas suyo suyos sé sí sólo +t tal tambien también tampoco tan tanto tarde te temprano tendremos tendrá tendrán tendrás tendré tendréis tendría tendríais tendríamos tendrían tendrías tened teneis tenemos tener tenga tengamos tengan tengas tengo tengáis tenida tenidas tenido tenidos teniendo tenéis tenía teníais teníamos tenían tenías tercera ti tiempo tiene tienen tienes toda todas todavia todavía todo todos total trabaja trabajais trabajamos trabajan trabajar trabajas trabajo tras trata través tres tu tus tuve tuviera tuvierais tuvieran tuvieras tuvieron tuviese tuvieseis tuviesen tuvieses tuvimos tuviste tuvisteis tuviéramos tuviésemos tuvo tuya tuyas tuyo tuyos tú +u ultimo un una unas uno unos usa usais usamos usan usar usas uso usted ustedes +v va vais valor vamos van varias varios vaya veces ver verdad verdadera verdadero vez vosotras vosotros voy vuestra vuestras vuestro vuestros +w +x +y ya yo +z +él éramos ésa ésas ése ésos ésta éstas éste éstos última últimas último últimos +""".split() +) + +STOP_WORDS.update(SPANISH_STOP_WORDS) +contractions = ["n't", "'d", "'ll", "'m", "'re", "'s", "'ve"] +STOP_WORDS.update(contractions) + +for apostrophe in ["‘", "’"]: + for stopword in contractions: + STOP_WORDS.add(stopword.replace("'", apostrophe)) diff --git a/files/tasks.py b/files/tasks.py new file mode 100644 index 0000000..cf7125f --- /dev/null +++ b/files/tasks.py @@ -0,0 +1,851 @@ +import re +import os +import json +import subprocess +from datetime import datetime, timedelta +import tempfile +import shutil +from django.core.cache import cache +from django.conf import settings +from django.core.files import File +from django.db.models import Q + +from celery import Task +from celery.decorators import task +from celery.utils.log import get_task_logger +from celery.exceptions import SoftTimeLimitExceeded + +from celery.task.control import revoke +from celery.signals import task_revoked + +from .backends import FFmpegBackend +from .exceptions import VideoEncodingError +from .helpers import ( + calculate_seconds, + rm_file, + create_temp_file, + get_file_name, + get_file_type, + media_file_info, + run_command, + produce_ffmpeg_commands, + produce_friendly_token, +) + +from actions.models import MediaAction, USER_MEDIA_ACTIONS +from users.models import User +from .models import Encoding, EncodeProfile, Media, Category, Rating, Tag +from .methods import list_tasks, pre_save_action, notify_users + +logger = get_task_logger(__name__) + +VALID_USER_ACTIONS = [action for action, name in USER_MEDIA_ACTIONS] + +ERRORS_LIST = [ + "Output file is empty, nothing was encoded", + "Invalid data found when processing input", + "Unable to find a suitable output format for", +] + + +@task(name="chunkize_media", bind=True, queue="short_tasks", soft_time_limit=60 * 30) +def chunkize_media(self, friendly_token, profiles, force=True): + """Break media in chunks and start encoding tasks""" + + profiles = [EncodeProfile.objects.get(id=profile) for profile in profiles] + media = Media.objects.get(friendly_token=friendly_token) + cwd = os.path.dirname(os.path.realpath(media.media_file.path)) + file_name = media.media_file.path.split("/")[-1] + random_prefix = produce_friendly_token() + file_format = "{0}_{1}".format(random_prefix, file_name) + chunks_file_name = "%02d_{0}".format(file_format) + chunks_file_name += ".mkv" + cmd = [ + settings.FFMPEG_COMMAND, + "-y", + "-i", + media.media_file.path, + "-c", + "copy", + "-f", + "segment", + "-segment_time", + str(settings.VIDEO_CHUNKS_DURATION), + chunks_file_name, + ] + chunks = [] + ret = run_command(cmd, cwd=cwd) + + if "out" in ret.keys(): + for line in ret.get("error").split("\n"): + ch = re.findall(r"Opening \'([\W\w]+)\' for writing", line) + if ch: + chunks.append(ch[0]) + if not chunks: + # command completely failed to segment file.putting to normal encode + logger.info( + "Failed to break file {0} in chunks." + " Putting to normal encode queue".format(friendly_token) + ) + for profile in profiles: + if media.video_height and media.video_height < profile.resolution: + if profile.resolution not in settings.MINIMUM_RESOLUTIONS_TO_ENCODE: + continue + encoding = Encoding(media=media, profile=profile) + encoding.save() + enc_url = settings.SSL_FRONTEND_HOST + encoding.get_absolute_url() + encode_media.delay( + friendly_token, profile.id, encoding.id, enc_url, force=force + ) + return False + + chunks = [os.path.join(cwd, ch) for ch in chunks] + to_profiles = [] + chunks_dict = {} + # calculate once md5sums + for chunk in chunks: + cmd = ["md5sum", chunk] + stdout = run_command(cmd).get("out") + md5sum = stdout.strip().split()[0] + chunks_dict[chunk] = md5sum + + for profile in profiles: + if media.video_height and media.video_height < profile.resolution: + if profile.resolution not in settings.MINIMUM_RESOLUTIONS_TO_ENCODE: + continue + to_profiles.append(profile) + + for chunk in chunks: + encoding = Encoding( + media=media, + profile=profile, + chunk_file_path=chunk, + chunk=True, + chunks_info=json.dumps(chunks_dict), + md5sum=chunks_dict[chunk], + ) + + encoding.save() + enc_url = settings.SSL_FRONTEND_HOST + encoding.get_absolute_url() + if profile.resolution in settings.MINIMUM_RESOLUTIONS_TO_ENCODE: + priority = 0 + else: + priority = 9 + encode_media.apply_async( + args=[friendly_token, profile.id, encoding.id, enc_url], + kwargs={"force": force, "chunk": True, "chunk_file_path": chunk}, + priority=priority, + ) + + logger.info( + "got {0} chunks and will encode to {1} profiles".format( + len(chunks), to_profiles + ) + ) + return True + + +class EncodingTask(Task): + def on_failure(self, exc, task_id, args, kwargs, einfo): + # mainly used to run some post failure steps + # we get here if a task is revoked + try: + if hasattr(self, "encoding"): + self.encoding.status = "fail" + self.encoding.save(update_fields=["status"]) + kill_ffmpeg_process(self.encoding.temp_file) + if hasattr(self.encoding, "media"): + self.encoding.media.post_encode_actions() + except BaseException: + pass + return False + + +@task( + name="encode_media", + base=EncodingTask, + bind=True, + queue="long_tasks", + soft_time_limit=settings.CELERY_SOFT_TIME_LIMIT, +) +def encode_media( + self, + friendly_token, + profile_id, + encoding_id, + encoding_url, + force=True, + chunk=False, + chunk_file_path="", +): + """Encode a media to given profile, using ffmpeg, storing progress""" + + logger.info( + "Encode Media started, friendly token {0}, profile id {1}, force {2}".format( + friendly_token, profile_id, force + ) + ) + + if self.request.id: + task_id = self.request.id + else: + task_id = None + try: + media = Media.objects.get(friendly_token=friendly_token) + profile = EncodeProfile.objects.get(id=profile_id) + except BaseException: + Encoding.objects.filter(id=encoding_id).delete() + return False + + # break logic with chunk True/False + if chunk: + # TODO: in case a video is chunkized and this enters here many times + # it will always run since chunk_file_path is always different + # thus find a better way for this check + if ( + Encoding.objects.filter( + media=media, profile=profile, chunk_file_path=chunk_file_path + ).count() + > 1 + and force == False + ): + Encoding.objects.filter(id=encoding_id).delete() + return False + else: + try: + encoding = Encoding.objects.get(id=encoding_id) + encoding.status = "running" + Encoding.objects.filter( + media=media, + profile=profile, + chunk=True, + chunk_file_path=chunk_file_path, + ).exclude(id=encoding_id).delete() + except BaseException: + encoding = Encoding( + media=media, + profile=profile, + status="running", + chunk=True, + chunk_file_path=chunk_file_path, + ) + else: + if ( + Encoding.objects.filter(media=media, profile=profile).count() > 1 + and force is False + ): + Encoding.objects.filter(id=encoding_id).delete() + return False + else: + try: + encoding = Encoding.objects.get(id=encoding_id) + encoding.status = "running" + Encoding.objects.filter(media=media, profile=profile).exclude( + id=encoding_id + ).delete() + except BaseException: + encoding = Encoding(media=media, profile=profile, status="running") + + if task_id: + encoding.task_id = task_id + encoding.worker = "localhost" + encoding.retries = self.request.retries + encoding.save() + + if profile.extension == "gif": + tf = create_temp_file(suffix=".gif") + # -ss 5 start from 5 second. -t 25 until 25 sec + command = [ + settings.FFMPEG_COMMAND, + "-y", + "-ss", + "3", + "-i", + media.media_file.path, + "-hide_banner", + "-vf", + "scale=344:-1:flags=lanczos,fps=1", + "-t", + "25", + "-f", + "gif", + tf, + ] + ret = run_command(command) + if os.path.exists(tf) and get_file_type(tf) == "image": + with open(tf, "rb") as f: + myfile = File(f) + encoding.status = "success" + encoding.media_file.save(content=myfile, name=tf) + rm_file(tf) + return True + else: + return False + + if chunk: + original_media_path = chunk_file_path + else: + original_media_path = media.media_file.path + + if not media.duration: + encoding.status = "fail" + encoding.save(update_fields=["status"]) + return False + + with tempfile.TemporaryDirectory(dir=settings.TEMP_DIRECTORY) as temp_dir: + + tf = create_temp_file(suffix=".{0}".format(profile.extension), dir=temp_dir) + tfpass = create_temp_file(suffix=".{0}".format(profile.extension), dir=temp_dir) + ffmpeg_commands = produce_ffmpeg_commands( + original_media_path, + media.media_info, + resolution=profile.resolution, + codec=profile.codec, + output_filename=tf, + pass_file=tfpass, + chunk=chunk, + ) + if not ffmpeg_commands: + encoding.status = "fail" + encoding.save(update_fields=["status"]) + return False + + encoding.temp_file = tf + encoding.commands = str(ffmpeg_commands) + + encoding.save(update_fields=["temp_file", "commands", "task_id"]) + + # binding these, so they are available on on_failure + self.encoding = encoding + self.media = media + # can be one-pass or two-pass + for ffmpeg_command in ffmpeg_commands: + ffmpeg_command = [str(s) for s in ffmpeg_command] + encoding_backend = FFmpegBackend() + try: + encoding_command = encoding_backend.encode(ffmpeg_command) + duration, n_times = 0, 0 + output = "" + while encoding_command: + try: + # TODO: understand an eternal loop + # eg h265 with mv4 file issue, and stop with error + output = next(encoding_command) + duration = calculate_seconds(output) + if duration: + percent = duration * 100 / media.duration + if n_times % 60 == 0: + encoding.progress = percent + try: + encoding.save( + update_fields=["progress", "update_date"] + ) + logger.info("Saved {0}".format(round(percent, 2))) + except BaseException: + pass + n_times += 1 + except StopIteration: + break + except VideoEncodingError: + # ffmpeg error, or ffmpeg was killed + raise + except Exception as e: + try: + # output is empty, fail message is on the exception + output = e.message + except AttributeError: + output = "" + if isinstance(e, SoftTimeLimitExceeded): + kill_ffmpeg_process(encoding.temp_file) + encoding.logs = output + encoding.status = "fail" + encoding.save(update_fields=["status", "logs"]) + raise_exception = True + # if this is an ffmpeg's valid error + # no need for the task to be re-run + # otherwise rerun task... + for error_msg in ERRORS_LIST: + if error_msg.lower() in output.lower(): + raise_exception = False + if raise_exception: + raise self.retry(exc=e, countdown=5, max_retries=1) + + encoding.logs = output + encoding.progress = 100 + + success = False + encoding.status = "fail" + if os.path.exists(tf) and os.path.getsize(tf) != 0: + ret = media_file_info(tf) + if ret.get("is_video") or ret.get("is_audio"): + encoding.status = "success" + success = True + + with open(tf, "rb") as f: + myfile = File(f) + output_name = "{0}.{1}".format( + get_file_name(original_media_path), profile.extension + ) + encoding.media_file.save(content=myfile, name=output_name) + encoding.total_run_time = ( + encoding.update_date - encoding.add_date + ).seconds + + try: + encoding.save( + update_fields=["status", "logs", "progress", "total_run_time"] + ) + # this will raise a django.db.utils.DatabaseError error when task is revoked, + # since we delete the encoding at that stage + except BaseException: + pass + + return success + + +@task(name="produce_sprite_from_video", queue="long_tasks") +def produce_sprite_from_video(friendly_token): + """Produces a sprites file for a video, uses ffmpeg""" + + try: + media = Media.objects.get(friendly_token=friendly_token) + except BaseException: + logger.info("failed to get media with friendly_token %s" % friendly_token) + return False + + with tempfile.TemporaryDirectory(dir=settings.TEMP_DIRECTORY) as tmpdirname: + try: + tmpdir_image_files = tmpdirname + "/img%03d.jpg" + output_name = tmpdirname + "/sprites.jpg" + cmd = "{0} -i {1} -f image2 -vf 'fps=1/10, scale=160:90' {2}&&files=$(ls {3}/img*.jpg | sort -t '-' -n -k 2 | tr '\n' ' ')&&convert $files -append {4}".format( + settings.FFMPEG_COMMAND, + media.media_file.path, + tmpdir_image_files, + tmpdirname, + output_name, + ) + ret = subprocess.run(cmd, stdout=subprocess.PIPE, shell=True) + if os.path.exists(output_name) and get_file_type(output_name) == "image": + with open(output_name, "rb") as f: + myfile = File(f) + media.sprites.save( + content=myfile, + name=get_file_name(media.media_file.path) + "sprites.jpg", + ) + except BaseException: + pass + return True + + +@task(name="create_hls", queue="long_tasks") +def create_hls(friendly_token): + """Creates HLS file for media, uses Bento4 mp4hls command""" + + if not hasattr(settings, "MP4HLS_COMMAND"): + logger.info("Bento4 mp4hls command is missing from configuration") + return False + + if not os.path.exists(settings.MP4HLS_COMMAND): + logger.info("Bento4 mp4hls command is missing") + return False + + try: + media = Media.objects.get(friendly_token=friendly_token) + except BaseException: + logger.info("failed to get media with friendly_token %s" % friendly_token) + return False + + p = media.uid.hex + output_dir = os.path.join(settings.HLS_DIR, p) + encodings = media.encodings.filter( + profile__extension="mp4", status="success", chunk=False, profile__codec="h264" + ) + if encodings: + existing_output_dir = None + if os.path.exists(output_dir): + existing_output_dir = output_dir + output_dir = os.path.join(settings.HLS_DIR, p + produce_friendly_token()) + files = " ".join([f.media_file.path for f in encodings if f.media_file]) + cmd = "{0} --segment-duration=4 --output-dir={1} {2}".format( + settings.MP4HLS_COMMAND, output_dir, files + ) + ret = subprocess.run(cmd, stdout=subprocess.PIPE, shell=True) + if existing_output_dir: + # override content with -T ! + cmd = "cp -rT {0} {1}".format(output_dir, existing_output_dir) + ret = subprocess.run(cmd, stdout=subprocess.PIPE, shell=True) + shutil.rmtree(output_dir) + output_dir = existing_output_dir + pp = os.path.join(output_dir, "master.m3u8") + if os.path.exists(pp): + if media.hls_file != pp: + media.hls_file = pp + media.save(update_fields=["hls_file"]) + return True + + +@task(name="check_running_states", queue="short_tasks") +def check_running_states(): + # Experimental - unused + """Check stale running encodings and delete/reencode media""" + + encodings = Encoding.objects.filter(status="running") + + logger.info("got {0} encodings that are in state running".format(encodings.count())) + changed = 0 + for encoding in encodings: + now = datetime.now(encoding.update_date.tzinfo) + if (now - encoding.update_date).seconds > settings.RUNNING_STATE_STALE: + media = encoding.media + profile = encoding.profile + task_id = encoding.task_id + # terminate task + if task_id: + revoke(task_id, terminate=True) + encoding.delete() + media.encode(profiles=[profile]) + # TODO: allign with new code + chunksize... + changed += 1 + if changed: + logger.info("changed from running to pending on {0} items".format(changed)) + return True + + +@task(name="check_media_states", queue="short_tasks") +def check_media_states(): + # Experimental - unused + # check encoding status of not success media + media = Media.objects.filter( + Q(encoding_status="running") + | Q(encoding_status="fail") + | Q(encoding_status="pending") + ) + + logger.info("got {0} media that are not in state success".format(media.count())) + + changed = 0 + for m in media: + m.set_encoding_status() + m.save(update_fields=["encoding_status"]) + changed += 1 + if changed: + logger.info("changed encoding status to {0} media items".format(changed)) + return True + + +@task(name="check_pending_states", queue="short_tasks") +def check_pending_states(): + # Experimental - unused + # check encoding profiles that are on state pending and not on a queue + encodings = Encoding.objects.filter(status="pending") + + if not encodings: + return True + + changed = 0 + tasks = list_tasks() + task_ids = tasks["task_ids"] + media_profile_pairs = tasks["media_profile_pairs"] + for encoding in encodings: + if encoding.task_id and encoding.task_id in task_ids: + # encoding is in one of the active/reserved/scheduled tasks list + continue + elif ( + encoding.media.friendly_token, + encoding.profile.id, + ) in media_profile_pairs: + continue + # encoding is in one of the reserved/scheduled tasks list. + # has no task_id but will be run, so need to re-enter the queue + else: + media = encoding.media + profile = encoding.profile + encoding.delete() + media.encode(profiles=[profile], force=False) + changed += 1 + if changed: + logger.info( + "set to the encode queue {0} encodings that were on pending state".format( + changed + ) + ) + return True + + +@task(name="check_missing_profiles", queue="short_tasks") +def check_missing_profiles(): + # Experimental - unused + + # check if video files have missing profiles. If so, add them + media = Media.objects.filter(media_type="video") + profiles = list(EncodeProfile.objects.all()) + + changed = 0 + + for m in media: + existing_profiles = [p.profile for p in m.encodings.all()] + missing_profiles = [p for p in profiles if p not in existing_profiles] + if missing_profiles: + m.encode(profiles=missing_profiles, force=False) + # since we call with force=False + # encode_media won't delete existing profiles + # if they appear on the meanwhile (eg on a big queue) + changed += 1 + if changed: + logger.info("set to the encode queue {0} profiles".format(changed)) + return True + + +@task(name="clear_sessions", queue="short_tasks") +def clear_sessions(): + """Clear expired sessions""" + + try: + from importlib import import_module + from django.conf import settings + + engine = import_module(settings.SESSION_ENGINE) + engine.SessionStore.clear_expired() + except BaseException: + return False + return True + + +@task(name="save_user_action", queue="short_tasks") +def save_user_action( + user_or_session, friendly_token=None, action="watch", extra_info=None +): + """Short task that saves a user action""" + + if action not in VALID_USER_ACTIONS: + return False + + try: + media = Media.objects.get(friendly_token=friendly_token) + except BaseException: + return False + + user = user_or_session.get("user_id") + session_key = user_or_session.get("user_session") + remote_ip = user_or_session.get("remote_ip_addr") + + if user: + try: + user = User.objects.get(id=user) + except BaseException: + return False + + if not (user or session_key): + return False + + if action in ["like", "dislike", "report"]: + if not pre_save_action( + media=media, + user=user, + session_key=session_key, + action=action, + remote_ip=remote_ip, + ): + return False + + if action == "watch": + if user: + MediaAction.objects.filter(user=user, media=media, action="watch").delete() + else: + MediaAction.objects.filter( + session_key=session_key, media=media, action="watch" + ).delete() + if action == "rate": + try: + score = extra_info.get("score") + rating_category = extra_info.get("category_id") + except BaseException: + # TODO: better error handling? + return False + try: + rating = Rating.objects.filter( + user=user, media=media, rating_category_id=rating_category + ).first() + if rating: + rating.score = score + rating.save(update_fields=["score"]) + else: + rating = Rating.objects.create( + user=user, + media=media, + rating_category_id=rating_category, + score=score, + ) + except Exception as exc: + # TODO: more specific handling, for errors in score, or + # rating_category? + return False + + ma = MediaAction( + user=user, + session_key=session_key, + media=media, + action=action, + extra_info=extra_info, + remote_ip=remote_ip, + ) + ma.save() + + if action == "watch": + media.views += 1 + media.save(update_fields=["views"]) + elif action == "report": + media.reported_times += 1 + + if media.reported_times >= settings.REPORTED_TIMES_THRESHOLD: + media.state = "private" + media.save(update_fields=["reported_times", "state"]) + + notify_users( + friendly_token=media.friendly_token, + action="media_reported", + extra=extra_info, + ) + elif action == "like": + media.likes += 1 + media.save(update_fields=["likes"]) + elif action == "dislike": + media.dislikes += 1 + media.save(update_fields=["dislikes"]) + + return True + + +@task(name="get_list_of_popular_media", queue="long_tasks") +def get_list_of_popular_media(): + """Experimental task for preparing media listing + for index page / recommended section + calculate and return the top 50 popular media, based on two rules + X = the top 25 videos that have the most views during the last week + Y = the most recent 25 videos that have been liked over the last 6 months + """ + + valid_media_x = {} + valid_media_y = {} + basic_query = Q(listable=True) + media_x = Media.objects.filter(basic_query).values("friendly_token") + + period_x = datetime.now() - timedelta(days=7) + period_y = datetime.now() - timedelta(days=30 * 6) + + for media in media_x: + ft = media["friendly_token"] + num = MediaAction.objects.filter( + action_date__gte=period_x, action="watch", media__friendly_token=ft + ).count() + if num: + valid_media_x[ft] = num + num = MediaAction.objects.filter( + action_date__gte=period_y, action="like", media__friendly_token=ft + ).count() + if num: + valid_media_y[ft] = num + + x = sorted(valid_media_x.items(), key=lambda kv: kv[1], reverse=True)[:25] + y = sorted(valid_media_y.items(), key=lambda kv: kv[1], reverse=True)[:25] + + media_ids = [a[0] for a in x] + media_ids.extend([a[0] for a in y]) + media_ids = list(set(media_ids)) + cache.set("popular_media_ids", media_ids, 60 * 60 * 12) + logger.info("saved popular media ids") + + return True + + +@task(name="update_listings_thumbnails", queue="long_tasks") +def update_listings_thumbnails(): + """Populate listings_thumbnail field for models""" + + # Categories + used_media = [] + saved = 0 + qs = Category.objects.filter().order_by("-media_count") + for object in qs: + media = ( + Media.objects.exclude(friendly_token__in=used_media) + .filter(category=object, state="public", is_reviewed=True) + .order_by("-views") + .first() + ) + if media: + object.listings_thumbnail = media.thumbnail_url + object.save(update_fields=["listings_thumbnail"]) + used_media.append(media.friendly_token) + saved += 1 + logger.info("updated {} categories".format(saved)) + + # Tags + used_media = [] + saved = 0 + qs = Tag.objects.filter().order_by("-media_count") + for object in qs: + media = ( + Media.objects.exclude(friendly_token__in=used_media) + .filter(tags=object, state="public", is_reviewed=True) + .order_by("-views") + .first() + ) + if media: + object.listings_thumbnail = media.thumbnail_url + object.save(update_fields=["listings_thumbnail"]) + used_media.append(media.friendly_token) + saved += 1 + logger.info("updated {} tags".format(saved)) + + return True + + +@task_revoked.connect +def task_sent_handler(sender=None, headers=None, body=None, **kwargs): + # For encode_media tasks that are revoked, + # ffmpeg command won't be stopped, since + # it got started by a subprocess. + # Need to stop that process + # Also, removing the Encoding object, + # since the task that would prepare it was killed + # Maybe add a killed state for Encoding objects + try: + uid = kwargs["request"].task_id + if uid: + encoding = Encoding.objects.get(task_id=uid) + encoding.delete() + logger.info("deleted the Encoding object") + if encoding.temp_file: + kill_ffmpeg_process(encoding.temp_file) + + except BaseException: + pass + + return True + + +def kill_ffmpeg_process(filepath): + # this is not ideal, ffmpeg pid could be linked to the Encoding object + cmd = "ps aux|grep 'ffmpeg'|grep %s|grep -v grep |awk '{print $2}'" % filepath + result = subprocess.run(cmd, stdout=subprocess.PIPE, shell=True) + pid = result.stdout.decode("utf-8").strip() + if pid: + cmd = "kill -9 %s" % pid + result = subprocess.run(cmd, stdout=subprocess.PIPE, shell=True) + return result + + +@task(name="remove_media_file", base=Task, queue="long_tasks") +def remove_media_file(media_file=None): + rm_file(media_file) + return True + + +# TODO LIST +# 1 chunks are deleted from original server when file is fully encoded. +# however need to enter this logic in cases of fail as well +# 2 script to delete chunks in fail status +# (and check for their encdings, and delete them as well, along with +# all chunks) +# 3 beat task, remove chunks diff --git a/files/tests.py b/files/tests.py new file mode 100644 index 0000000..e69de29 diff --git a/files/urls.py b/files/urls.py new file mode 100644 index 0000000..561242c --- /dev/null +++ b/files/urls.py @@ -0,0 +1,91 @@ +from django.conf.urls.static import static +from django.conf import settings +from django.conf.urls import url, include +from django.urls import path + +from . import views +from . import management_views +from .feeds import RssMediaFeed + +urlpatterns = [ + url(r"^$", views.index), + url(r"^about", views.about, name="about"), + url(r"^add_subtitle", views.add_subtitle, name="add_subtitle"), + url(r"^categories$", views.categories, name="categories"), + url(r"^contact$", views.contact, name="contact"), + url(r"^edit", views.edit_media, name="edit_media"), + url(r"^embed", views.embed_media, name="get_embed"), + url(r"^featured$", views.featured_media), + url(r"^fu/", include(("uploader.urls", "uploader"), namespace="uploader")), + url(r"^history$", views.history, name="history"), + url(r"^liked$", views.liked_media, name="liked_media"), + url(r"^latest$", views.latest_media), + url(r"^members", views.members, name="members"), + url( + r"^playlist/(?P[\w]*)$", + views.view_playlist, + name="get_playlist", + ), + url( + r"^playlists/(?P[\w]*)$", + views.view_playlist, + name="get_playlist", + ), + url(r"^popular$", views.recommended_media), + url(r"^recommended$", views.recommended_media), + path("rss/", RssMediaFeed()), + url(r"^search", views.search, name="search"), + url(r"^scpublisher", views.upload_media, name="upload_media"), + url(r"^tags", views.tags, name="tags"), + url(r"^tos$", views.tos, name="terms_of_service"), + url(r"^view", views.view_media, name="get_media"), + url(r"^upload", views.upload_media, name="upload_media"), + # API VIEWS + url(r"^api/v1/media$", views.MediaList.as_view()), + url(r"^api/v1/media/$", views.MediaList.as_view()), + url( + r"^api/v1/media/(?P[\w]*)$", + views.MediaDetail.as_view(), + name="api_get_media", + ), + url( + r"^api/v1/media/encoding/(?P[\w]*)$", + views.EncodingDetail.as_view(), + name="api_get_encoding", + ), + url(r"^api/v1/search$", views.MediaSearch.as_view()), + url( + r"^api/v1/media/(?P[\w]*)/actions$", + views.MediaActions.as_view(), + ), + url(r"^api/v1/categories$", views.CategoryList.as_view()), + url(r"^api/v1/tags$", views.TagList.as_view()), + url(r"^api/v1/comments$", views.CommentList.as_view()), + url( + r"^api/v1/media/(?P[\w]*)/comments$", + views.CommentDetail.as_view(), + ), + url( + r"^api/v1/media/(?P[\w]*)/comments/(?P[\w-]*)$", + views.CommentDetail.as_view(), + ), + url(r"^api/v1/playlists$", views.PlaylistList.as_view()), + url(r"^api/v1/playlists/$", views.PlaylistList.as_view()), + url( + r"^api/v1/playlists/(?P[\w]*)$", + views.PlaylistDetail.as_view(), + name="api_get_playlist", + ), + url(r"^api/v1/user/action/(?P[\w]*)$", views.UserActions.as_view()), + # ADMIN VIEWS + url(r"^api/v1/encode_profiles/$", views.EncodeProfileList.as_view()), + url(r"^api/v1/manage_media$", management_views.MediaList.as_view()), + url(r"^api/v1/manage_comments$", management_views.CommentList.as_view()), + url(r"^api/v1/manage_users$", management_views.UserList.as_view()), + url(r"^api/v1/tasks$", views.TasksList.as_view()), + url(r"^api/v1/tasks/$", views.TasksList.as_view()), + url(r"^api/v1/tasks/(?P[\w|\W]*)$", views.TaskDetail.as_view()), + url(r"^manage/comments$", views.manage_comments, name="manage_comments"), + url(r"^manage/media$", views.manage_media, name="manage_media"), + url(r"^manage/users$", views.manage_users, name="manage_users"), +] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) diff --git a/files/views.py b/files/views.py new file mode 100644 index 0000000..3905ddf --- /dev/null +++ b/files/views.py @@ -0,0 +1,1273 @@ +from django.shortcuts import render +from django.http import HttpResponseRedirect +from django.conf import settings +from django.shortcuts import get_object_or_404 +from django.db.models import Q +from django.contrib.auth.decorators import login_required +from django.contrib import messages +from django.template.defaultfilters import slugify +from django.core.mail import EmailMessage +from django.contrib.postgres.search import SearchQuery + +from rest_framework import permissions +from rest_framework.views import APIView +from rest_framework.response import Response +from rest_framework.settings import api_settings +from rest_framework.exceptions import PermissionDenied +from rest_framework import status +from rest_framework.parsers import ( + JSONParser, + MultiPartParser, + FileUploadParser, + FormParser, +) + +from celery.task.control import revoke +from cms.permissions import IsAuthorizedToAdd, IsUserOrEditor +from cms.permissions import user_allowed_to_upload +from cms.custom_pagination import FastPaginationWithoutCount +from actions.models import MediaAction, USER_MEDIA_ACTIONS +from users.models import User +from .helpers import produce_ffmpeg_commands, clean_query +from .models import ( + Media, + EncodeProfile, + Encoding, + Playlist, + PlaylistMedia, + Comment, + Category, + Tag, +) +from .forms import MediaForm, ContactForm, SubtitleForm +from .tasks import save_user_action +from .methods import ( + list_tasks, + get_user_or_session, + show_recommended_media, + show_related_media, + is_mediacms_editor, + is_mediacms_manager, + update_user_ratings, + notify_user_on_comment, +) +from .serializers import ( + MediaSerializer, + CategorySerializer, + TagSerializer, + SingleMediaSerializer, + EncodeProfileSerializer, + MediaSearchSerializer, + PlaylistSerializer, + PlaylistDetailSerializer, + CommentSerializer, +) +from .stop_words import STOP_WORDS + +VALID_USER_ACTIONS = [action for action, name in USER_MEDIA_ACTIONS] + + +def about(request): + """About view""" + + context = {} + return render(request, "cms/about.html", context) + + +@login_required +def add_subtitle(request): + """Add subtitle view""" + + friendly_token = request.GET.get("m", "").strip() + if not friendly_token: + return HttpResponseRedirect("/") + media = Media.objects.filter(friendly_token=friendly_token).first() + if not media: + return HttpResponseRedirect("/") + + if not ( + request.user == media.user + or is_mediacms_editor(request.user) + or is_mediacms_manager(request.user) + ): + return HttpResponseRedirect("/") + + if request.method == "POST": + form = SubtitleForm(media, request.POST, request.FILES) + if form.is_valid(): + subtitle = form.save() + messages.add_message(request, messages.INFO, "Subtitle was added!") + return HttpResponseRedirect(subtitle.media.get_absolute_url()) + else: + form = SubtitleForm(media_item=media) + return render(request, "cms/add_subtitle.html", {"form": form}) + + +def categories(request): + """List categories view""" + + context = {} + return render(request, "cms/categories.html", context) + + +def contact(request): + """Contact view""" + + context = {} + if request.method == "GET": + form = ContactForm(request.user) + context["form"] = form + + else: + form = ContactForm(request.user, request.POST) + if form.is_valid(): + if request.user.is_authenticated: + from_email = request.user.email + name = request.user.name + else: + from_email = request.POST.get("from_email") + name = request.POST.get("name") + message = request.POST.get("message") + + title = "[{}] - Contact form message received".format(settings.PORTAL_NAME) + + msg = """ +You have received a message through the contact form\n +Sender name: %s +Sender email: %s\n +\n %s +""" % ( + name, + from_email, + message, + ) + email = EmailMessage( + title, + msg, + settings.DEFAULT_FROM_EMAIL, + settings.ADMIN_EMAIL_LIST, + reply_to=[from_email], + ) + email.send(fail_silently=True) + success_msg = "Message was sent! Thanks for contacting" + context["success_msg"] = success_msg + + return render(request, "cms/contact.html", context) + + +def history(request): + """Show personal history view""" + + context = {} + return render(request, "cms/history.html", context) + + +@login_required +def edit_media(request): + """Edit a media view""" + + friendly_token = request.GET.get("m", "").strip() + if not friendly_token: + return HttpResponseRedirect("/") + media = Media.objects.filter(friendly_token=friendly_token).first() + + if not media: + return HttpResponseRedirect("/") + + if not ( + request.user == media.user + or is_mediacms_editor(request.user) + or is_mediacms_manager(request.user) + ): + return HttpResponseRedirect("/") + if request.method == "POST": + form = MediaForm(request.user, request.POST, request.FILES, instance=media) + if form.is_valid(): + media = form.save() + for tag in media.tags.all(): + media.tags.remove(tag) + if form.cleaned_data.get("new_tags"): + for tag in form.cleaned_data.get("new_tags").split(","): + tag = slugify(tag) + if tag: + try: + tag = Tag.objects.get(title=tag) + except Tag.DoesNotExist: + tag = Tag.objects.create(title=tag, user=request.user) + if tag not in media.tags.all(): + media.tags.add(tag) + messages.add_message(request, messages.INFO, "Media was edited!") + return HttpResponseRedirect(media.get_absolute_url()) + else: + form = MediaForm(request.user, instance=media) + return render( + request, + "cms/edit_media.html", + {"form": form, "add_subtitle_url": media.add_subtitle_url}, + ) + + +def embed_media(request): + """Embed media view""" + + friendly_token = request.GET.get("m", "").strip() + if not friendly_token: + return HttpResponseRedirect("/") + + media = Media.objects.values("title").filter(friendly_token=friendly_token).first() + + if not media: + return HttpResponseRedirect("/") + + user_or_session = get_user_or_session(request) + + context = {} + context["media"] = friendly_token + return render(request, "cms/embed.html", context) + + +def featured_media(request): + """List featured media view""" + + context = {} + return render(request, "cms/featured-media.html", context) + + +def index(request): + """Index view""" + + context = {} + return render(request, "cms/index.html", context) + + +def latest_media(request): + """List latest media view""" + + context = {} + return render(request, "cms/latest-media.html", context) + + +def liked_media(request): + """List user's liked media view""" + + context = {} + return render(request, "cms/liked_media.html", context) + + +@login_required +def manage_users(request): + """List users management view""" + + context = {} + return render(request, "cms/manage_users.html", context) + + +@login_required +def manage_media(request): + """List media management view""" + + context = {} + return render(request, "cms/manage_media.html", context) + + +@login_required +def manage_comments(request): + """List comments management view""" + + context = {} + return render(request, "cms/manage_comments.html", context) + + +def members(request): + """List members view""" + + context = {} + return render(request, "cms/members.html", context) + + +def recommended_media(request): + """List recommended media view""" + + context = {} + return render(request, "cms/recommended-media.html", context) + + +def search(request): + """Search view""" + + context = {} + return render(request, "cms/search.html", context) + + +def tags(request): + """List tags view""" + + context = {} + return render(request, "cms/tags.html", context) + + +def tos(request): + """Terms of service view""" + + context = {} + return render(request, "cms/tos.html", context) + + +def upload_media(request): + """Upload media view""" + + from allauth.account.forms import LoginForm + + form = LoginForm() + context = {} + context["form"] = form + context["can_add"] = user_allowed_to_upload(request) + can_upload_exp = settings.CANNOT_ADD_MEDIA_MESSAGE + context["can_upload_exp"] = can_upload_exp + + return render(request, "cms/add-media.html", context) + + +def view_media(request): + """View media view""" + + friendly_token = request.GET.get("m", "").strip() + context = {} + media = Media.objects.filter(friendly_token=friendly_token).first() + if not media: + context["media"] = None + return render(request, "cms/media.html", context) + + user_or_session = get_user_or_session(request) + save_user_action.delay( + user_or_session, friendly_token=friendly_token, action="watch" + ) + context = {} + context["media"] = friendly_token + context["media_object"] = media + + context["CAN_DELETE_MEDIA"] = False + context["CAN_EDIT_MEDIA"] = False + context["CAN_DELETE_COMMENTS"] = False + + if request.user.is_authenticated: + if ( + (media.user.id == request.user.id) + or is_mediacms_editor(request.user) + or is_mediacms_manager(request.user) + ): + context["CAN_DELETE_MEDIA"] = True + context["CAN_EDIT_MEDIA"] = True + context["CAN_DELETE_COMMENTS"] = True + return render(request, "cms/media.html", context) + + +def view_playlist(request, friendly_token): + """View playlist view""" + + try: + playlist = Playlist.objects.get(friendly_token=friendly_token) + except BaseException: + playlist = None + + context = {} + context["playlist"] = playlist + return render(request, "cms/playlist.html", context) + + +class MediaList(APIView): + """Media listings views""" + + permission_classes = (IsAuthorizedToAdd,) + parser_classes = (JSONParser, MultiPartParser, FormParser, FileUploadParser) + + def get(self, request, format=None): + # Show media + params = self.request.query_params + show_param = params.get("show", "") + + author_param = params.get("author", "").strip() + if author_param: + user_queryset = User.objects.all() + user = get_object_or_404(user_queryset, username=author_param) + if show_param == "recommended": + pagination_class = FastPaginationWithoutCount + media = show_recommended_media(request, limit=50) + else: + pagination_class = api_settings.DEFAULT_PAGINATION_CLASS + if author_param: + # in case request.user is the user here, show + # all media independant of state + if self.request.user == user: + basic_query = Q(user=user) + else: + basic_query = Q(listable=True, user=user) + else: + # base listings should show safe content + basic_query = Q(listable=True) + + if show_param == "featured": + media = Media.objects.filter(basic_query, featured=True) + else: + media = Media.objects.filter(basic_query).order_by("-add_date") + + paginator = pagination_class() + + if show_param != "recommended": + media = media.prefetch_related("user") + page = paginator.paginate_queryset(media, request) + + serializer = MediaSerializer(page, many=True, context={"request": request}) + return paginator.get_paginated_response(serializer.data) + + def post(self, request, format=None): + # Add new media + serializer = MediaSerializer(data=request.data, context={"request": request}) + if serializer.is_valid(): + media_file = request.data["media_file"] + serializer.save(user=request.user, media_file=media_file) + return Response(serializer.data, status=status.HTTP_201_CREATED) + return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) + + +class MediaDetail(APIView): + """ + Retrieve, update or delete a media instance. + """ + + permission_classes = (permissions.IsAuthenticatedOrReadOnly, IsUserOrEditor) + parser_classes = (JSONParser, MultiPartParser, FormParser, FileUploadParser) + + def get_object(self, friendly_token, password=None): + try: + media = ( + Media.objects.select_related("user") + .prefetch_related("encodings__profile") + .get(friendly_token=friendly_token) + ) + + # this need be explicitly called, and will call + # has_object_permission() after has_permission has succeeded + self.check_object_permissions(self.request, media) + + if media.state == "private" and not ( + self.request.user == media.user or is_mediacms_editor(self.request.user) + ): + if ( + (not password) + or (not media.password) + or (password != media.password) + ): + return Response( + {"detail": "media is private"}, + status=status.HTTP_401_UNAUTHORIZED, + ) + return media + except PermissionDenied: + return Response( + {"detail": "bad permissions"}, status=status.HTTP_401_UNAUTHORIZED + ) + except BaseException: + return Response( + {"detail": "media file does not exist"}, + status=status.HTTP_400_BAD_REQUEST, + ) + + def get(self, request, friendly_token, format=None): + # Get media details + password = request.GET.get("password") + media = self.get_object(friendly_token, password=password) + if isinstance(media, Response): + return media + + serializer = SingleMediaSerializer(media, context={"request": request}) + if media.state == "private": + related_media = [] + else: + related_media = show_related_media(media, request=request, limit=100) + related_media_serializer = MediaSerializer( + related_media, many=True, context={"request": request} + ) + related_media = related_media_serializer.data + ret = serializer.data + + # update rattings info with user specific ratings + # eg user has already rated for this media + # this only affects user rating and only if enabled + if ( + settings.ALLOW_RATINGS + and ret.get("ratings_info") + and not request.user.is_anonymous + ): + ret["ratings_info"] = update_user_ratings( + request.user, media, ret.get("ratings_info") + ) + + ret["related_media"] = related_media + return Response(ret) + + def post(self, request, friendly_token, format=None): + """superuser actions + Available only to MediaCMS editors and managers + + Action is a POST variable, review and encode are implemented + """ + + media = self.get_object(friendly_token) + if isinstance(media, Response): + return media + + if not (is_mediacms_editor(request.user) or is_mediacms_manager(request.user)): + return Response( + {"detail": "not allowed"}, status=status.HTTP_400_BAD_REQUEST + ) + + action = request.data.get("type") + profiles_list = request.data.get("encoding_profiles") + result = request.data.get("result", True) + + if action == "encode": + # Create encoding tasks for specific profiles + valid_profiles = [] + if profiles_list: + if isinstance(profiles_list, list): + for p in profiles_list: + p = EncodeProfile.objects.filter(id=p).first() + if p: + valid_profiles.append(p) + elif isinstance(profiles_list, str): + try: + p = EncodeProfile.objects.filter(id=int(profiles_list)).first() + valid_profiles.append(p) + except ValueError: + return Response( + { + "detail": "encoding_profiles must be int or list of ints of valid encode profiles" + }, + status=status.HTTP_400_BAD_REQUEST, + ) + media.encode(profiles=valid_profiles) + return Response( + {"detail": "media will be encoded"}, status=status.HTTP_201_CREATED + ) + elif action == "review": + if result: + media.is_reviewed = True + elif result == False: + media.is_reviewed = False + media.save(update_fields=["is_reviewed"]) + return Response( + {"detail": "media reviewed set"}, status=status.HTTP_201_CREATED + ) + return Response( + {"detail": "not valid action or no action specified"}, + status=status.HTTP_400_BAD_REQUEST, + ) + + def put(self, request, friendly_token, format=None): + # Update a media object + media = self.get_object(friendly_token) + if isinstance(media, Response): + return media + + serializer = MediaSerializer( + media, data=request.data, context={"request": request} + ) + if serializer.is_valid(): + media_file = request.data["media_file"] + serializer.save(user=request.user, media_file=media_file) + return Response(serializer.data, status=status.HTTP_201_CREATED) + return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) + + def delete(self, request, friendly_token, format=None): + # Delete a media object + media = self.get_object(friendly_token) + if isinstance(media, Response): + return media + media.delete() + return Response(status=status.HTTP_204_NO_CONTENT) + + +class MediaActions(APIView): + """ + Retrieve, update or delete a media action instance. + """ + + permission_classes = (permissions.AllowAny,) + parser_classes = (JSONParser,) + + def get_object(self, friendly_token): + try: + media = ( + Media.objects.select_related("user") + .prefetch_related("encodings__profile") + .get(friendly_token=friendly_token) + ) + if media.state == "private" and self.request.user != media.user: + return Response( + {"detail": "media is private"}, status=status.HTTP_400_BAD_REQUEST + ) + return media + except PermissionDenied: + return Response( + {"detail": "bad permissions"}, status=status.HTTP_400_BAD_REQUEST + ) + except BaseException: + return Response( + {"detail": "media file does not exist"}, + status=status.HTTP_400_BAD_REQUEST, + ) + + def get(self, request, friendly_token, format=None): + # show date and reason for each time media was reported + media = self.get_object(friendly_token) + if isinstance(media, Response): + return media + + ret = {} + reported = MediaAction.objects.filter(media=media, action="report") + ret["reported"] = [] + for rep in reported: + item = {"reported_date": rep.action_date, "reason": rep.extra_info} + ret["reported"].append(item) + + return Response(ret, status=status.HTTP_200_OK) + + def post(self, request, friendly_token, format=None): + # perform like/dislike/report actions + media = self.get_object(friendly_token) + if isinstance(media, Response): + return media + + action = request.data.get("type") + extra = request.data.get("extra_info") + if request.user.is_anonymous: + # there is a list of allowed actions for + # anonymous users, specified in settings + if action not in settings.ALLOW_ANONYMOUS_ACTIONS: + return Response( + {"detail": "action allowed on logged in users only"}, + status=status.HTTP_400_BAD_REQUEST, + ) + if action: + user_or_session = get_user_or_session(request) + save_user_action.delay( + user_or_session, + friendly_token=media.friendly_token, + action=action, + extra_info=extra, + ) + + return Response( + {"detail": "action received"}, status=status.HTTP_201_CREATED + ) + else: + return Response( + {"detail": "no action specified"}, status=status.HTTP_400_BAD_REQUEST + ) + + def delete(self, request, friendly_token, format=None): + media = self.get_object(friendly_token) + if isinstance(media, Response): + return media + + if not request.user.is_superuser: + return Response( + {"detail": "not allowed"}, status=status.HTTP_400_BAD_REQUEST + ) + + action = request.data.get("type") + if action: + if action == "report": # delete reported actions + MediaAction.objects.filter(media=media, action="report").delete() + media.reported_times = 0 + media.save(update_fields=["reported_times"]) + return Response( + {"detail": "reset reported times counter"}, + status=status.HTTP_201_CREATED, + ) + else: + return Response( + {"detail": "no action specified"}, status=status.HTTP_400_BAD_REQUEST + ) + + +class MediaSearch(APIView): + """ + Retrieve results for searc + Only GET is implemented here + """ + + parser_classes = (JSONParser,) + + def get(self, request, format=None): + params = self.request.query_params + query = params.get("q", "").strip().lower() + category = params.get("c", "").strip() + tag = params.get("t", "").strip() + + ordering = params.get("ordering", "").strip() + sort_by = params.get("sort_by", "").strip() + media_type = params.get("media_type", "").strip() + + author = params.get("author", "").strip() + + sort_by_options = ["title", "add_date", "edit_date", "views", "likes"] + if sort_by not in sort_by_options: + sort_by = "add_date" + if ordering == "asc": + ordering = "" + else: + ordering = "-" + + if media_type not in ["video", "image", "audio", "pdf"]: + media_type = None + + if not (query or category or tag): + ret = {} + return Response(ret, status=status.HTTP_200_OK) + + media = Media.objects.filter(state="public", is_reviewed=True) + + if query: + query = clean_query(query) + q_parts = [q_part for q_part in query.split() if q_part not in STOP_WORDS] + if q_parts: + query = SearchQuery(q_parts[0] + ":*", search_type="raw") + for part in q_parts[1:]: + query &= SearchQuery(part + ":*", search_type="raw") + else: + query = None + if query: + media = media.filter(search=query) + + if tag: + media = media.filter(tags__title=tag) + + if category: + media = media.filter(category__title__contains=category) + + if media_type: + media = media.filter(media_type=media_type) + + if author: + media = media.filter(user__username=author) + + media = media.order_by(f"{ordering}{sort_by}") + + if self.request.query_params.get("show", "").strip() == "titles": + media = media.values("title")[:40] + return Response(media, status=status.HTTP_200_OK) + else: + media = media.prefetch_related("user") + if category or tag: + pagination_class = api_settings.DEFAULT_PAGINATION_CLASS + else: + # pagination_class = FastPaginationWithoutCount + pagination_class = api_settings.DEFAULT_PAGINATION_CLASS + paginator = pagination_class() + page = paginator.paginate_queryset(media, request) + serializer = MediaSearchSerializer( + page, many=True, context={"request": request} + ) + return paginator.get_paginated_response(serializer.data) + + +class PlaylistList(APIView): + """Playlists listings and creation views""" + + permission_classes = (permissions.IsAuthenticatedOrReadOnly, IsAuthorizedToAdd) + parser_classes = (JSONParser, MultiPartParser, FormParser, FileUploadParser) + + def get(self, request, format=None): + pagination_class = api_settings.DEFAULT_PAGINATION_CLASS + paginator = pagination_class() + playlists = Playlist.objects.filter().prefetch_related("user") + + if "author" in self.request.query_params: + author = self.request.query_params["author"].strip() + playlists = playlists.filter(user__username=author) + + page = paginator.paginate_queryset(playlists, request) + + serializer = PlaylistSerializer(page, many=True, context={"request": request}) + return paginator.get_paginated_response(serializer.data) + + def post(self, request, format=None): + serializer = PlaylistSerializer(data=request.data, context={"request": request}) + if serializer.is_valid(): + serializer.save(user=request.user) + return Response(serializer.data, status=status.HTTP_201_CREATED) + return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) + + +class PlaylistDetail(APIView): + """Playlist related views""" + + permission_classes = (permissions.IsAuthenticatedOrReadOnly, IsUserOrEditor) + parser_classes = (JSONParser, MultiPartParser, FormParser, FileUploadParser) + + def get_playlist(self, friendly_token): + try: + playlist = Playlist.objects.get(friendly_token=friendly_token) + self.check_object_permissions(self.request, playlist) + return playlist + except PermissionDenied: + return Response( + {"detail": "not enough permissions"}, status=status.HTTP_400_BAD_REQUEST + ) + except BaseException: + return Response( + {"detail": "Playlist does not exist"}, + status=status.HTTP_400_BAD_REQUEST, + ) + + def get(self, request, friendly_token, format=None): + playlist = self.get_playlist(friendly_token) + if isinstance(playlist, Response): + return playlist + + serializer = PlaylistDetailSerializer(playlist, context={"request": request}) + + playlist_media = PlaylistMedia.objects.filter( + playlist=playlist + ).prefetch_related("media__user") + + playlist_media = [c.media for c in playlist_media] + playlist_media_serializer = MediaSerializer( + playlist_media, many=True, context={"request": request} + ) + ret = serializer.data + ret["playlist_media"] = playlist_media_serializer.data + + return Response(ret) + + def post(self, request, friendly_token, format=None): + playlist = self.get_playlist(friendly_token) + if isinstance(playlist, Response): + return playlist + serializer = PlaylistDetailSerializer( + playlist, data=request.data, context={"request": request} + ) + if serializer.is_valid(): + serializer.save(user=request.user) + return Response(serializer.data, status=status.HTTP_201_CREATED) + return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) + + def put(self, request, friendly_token, format=None): + playlist = self.get_playlist(friendly_token) + if isinstance(playlist, Response): + return playlist + action = request.data.get("type") + media_friendly_token = request.data.get("media_friendly_token") + ordering = 0 + if request.data.get("ordering"): + try: + ordering = int(request.data.get("ordering")) + except ValueError: + pass + + if action in ["add", "remove", "ordering"]: + media = Media.objects.filter( + friendly_token=media_friendly_token, state="public", media_type="video" + ).first() + if media: + if action == "add": + media_in_playlist = PlaylistMedia.objects.filter( + playlist=playlist + ).count() + if media_in_playlist >= settings.MAX_MEDIA_PER_PLAYLIST: + return Response( + {"detail": "max number of media for a Playlist reached"}, + status=status.HTTP_400_BAD_REQUEST, + ) + else: + obj, created = PlaylistMedia.objects.get_or_create( + playlist=playlist, + media=media, + ordering=media_in_playlist + 1, + ) + obj.save() + return Response( + {"detail": "media added to Playlist"}, + status=status.HTTP_201_CREATED, + ) + elif action == "remove": + PlaylistMedia.objects.filter( + playlist=playlist, media=media + ).delete() + return Response( + {"detail": "media removed from Playlist"}, + status=status.HTTP_201_CREATED, + ) + elif action == "ordering": + if ordering: + playlist.set_ordering(media, ordering) + return Response( + {"detail": "new ordering set"}, + status=status.HTTP_201_CREATED, + ) + else: + return Response( + {"detail": "media is not valid"}, status=status.HTTP_400_BAD_REQUEST + ) + return Response( + {"detail": "invalid or not specified action"}, + status=status.HTTP_400_BAD_REQUEST, + ) + + def delete(self, request, friendly_token, format=None): + playlist = self.get_playlist(friendly_token) + if isinstance(playlist, Response): + return playlist + + playlist.delete() + return Response(status=status.HTTP_204_NO_CONTENT) + + +class EncodingDetail(APIView): + """Experimental. This View is used by remote workers + Needs heavy testing and documentation. + """ + + permission_classes = (permissions.IsAdminUser,) + parser_classes = (JSONParser, MultiPartParser, FormParser, FileUploadParser) + + def post(self, request, encoding_id): + ret = {} + force = request.data.get("force", False) + task_id = request.data.get("task_id", False) + action = request.data.get("action", "") + chunk = request.data.get("chunk", False) + chunk_file_path = request.data.get("chunk_file_path", "") + + encoding_status = request.data.get("status", "") + progress = request.data.get("progress", "") + commands = request.data.get("commands", "") + logs = request.data.get("logs", "") + retries = request.data.get("retries", "") + worker = request.data.get("worker", "") + temp_file = request.data.get("temp_file", "") + total_run_time = request.data.get("total_run_time", "") + if action == "start": + try: + encoding = Encoding.objects.get(id=encoding_id) + media = encoding.media + profile = encoding.profile + except BaseException: + Encoding.objects.filter(id=encoding_id).delete() + return Response({"status": "fail"}, status=status.HTTP_400_BAD_REQUEST) + # TODO: break chunk True/False logic here + if ( + Encoding.objects.filter( + media=media, + profile=profile, + chunk=chunk, + chunk_file_path=chunk_file_path, + ).count() + > 1 + and force == False + ): + Encoding.objects.filter(id=encoding_id).delete() + return Response({"status": "fail"}, status=status.HTTP_400_BAD_REQUEST) + else: + Encoding.objects.filter( + media=media, + profile=profile, + chunk=chunk, + chunk_file_path=chunk_file_path, + ).exclude(id=encoding.id).delete() + + encoding.status = "running" + if task_id: + encoding.task_id = task_id + + encoding.save() + if chunk: + original_media_path = chunk_file_path + original_media_md5sum = encoding.md5sum + original_media_url = ( + settings.SSL_FRONTEND_HOST + encoding.media_chunk_url + ) + else: + original_media_path = media.media_file.path + original_media_md5sum = media.md5sum + original_media_url = ( + settings.SSL_FRONTEND_HOST + media.original_media_url + ) + + ret["original_media_url"] = original_media_url + ret["original_media_path"] = original_media_path + ret["original_media_md5sum"] = original_media_md5sum + + # generating the commands here, and will replace these with temporary + # files created on the remote server + tf = "TEMP_FILE_REPLACE" + tfpass = "TEMP_FPASS_FILE_REPLACE" + ffmpeg_commands = produce_ffmpeg_commands( + original_media_path, + media.media_info, + resolution=profile.resolution, + codec=profile.codec, + output_filename=tf, + pass_file=tfpass, + chunk=chunk, + ) + if not ffmpeg_commands: + encoding.delete() + return Response({"status": "fail"}, status=status.HTTP_400_BAD_REQUEST) + + ret["duration"] = media.duration + ret["ffmpeg_commands"] = ffmpeg_commands + ret["profile_extension"] = profile.extension + return Response(ret, status=status.HTTP_201_CREATED) + elif action == "update_fields": + try: + encoding = Encoding.objects.get(id=encoding_id) + except BaseException: + return Response({"status": "fail"}, status=status.HTTP_400_BAD_REQUEST) + to_update = ["size", "update_date"] + if encoding_status: + encoding.status = encoding_status + to_update.append("status") + if progress: + encoding.progress = progress + to_update.append("progress") + if logs: + encoding.logs = logs + to_update.append("logs") + if commands: + encoding.commands = commands + to_update.append("commands") + if task_id: + encoding.task_id = task_id + to_update.append("task_id") + if total_run_time: + encoding.total_run_time = total_run_time + to_update.append("total_run_time") + if worker: + encoding.worker = worker + to_update.append("worker") + if temp_file: + encoding.temp_file = temp_file + to_update.append("temp_file") + + if retries: + encoding.retries = retries + to_update.append("retries") + + try: + encoding.save(update_fields=to_update) + except BaseException: + return Response({"status": "fail"}, status=status.HTTP_400_BAD_REQUEST) + return Response({"status": "success"}, status=status.HTTP_201_CREATED) + + def put(self, request, encoding_id, format=None): + encoding_file = request.data["file"] + encoding = Encoding.objects.filter(id=encoding_id).first() + if not encoding: + return Response( + {"detail": "encoding does not exist"}, + status=status.HTTP_400_BAD_REQUEST, + ) + encoding.media_file = encoding_file + encoding.save() + return Response({"detail": "ok"}, status=status.HTTP_201_CREATED) + + +class CommentList(APIView): + permission_classes = (permissions.IsAuthenticatedOrReadOnly, IsAuthorizedToAdd) + parser_classes = (JSONParser, MultiPartParser, FormParser, FileUploadParser) + + def get(self, request, format=None): + pagination_class = api_settings.DEFAULT_PAGINATION_CLASS + paginator = pagination_class() + comments = Comment.objects.filter() + comments = comments.prefetch_related("user") + comments = comments.prefetch_related("media") + params = self.request.query_params + if "author" in params: + author_param = params["author"].strip() + user_queryset = User.objects.all() + user = get_object_or_404(user_queryset, username=author_param) + comments = comments.filter(user=user) + + page = paginator.paginate_queryset(comments, request) + + serializer = CommentSerializer(page, many=True, context={"request": request}) + return paginator.get_paginated_response(serializer.data) + + +class CommentDetail(APIView): + """Comments related views + Listings of comments for a media (GET) + Create comment (POST) + Delete comment (DELETE) + """ + + permission_classes = (permissions.IsAuthenticatedOrReadOnly, IsUserOrEditor) + parser_classes = (JSONParser, MultiPartParser, FormParser, FileUploadParser) + + def get_object(self, friendly_token): + try: + media = Media.objects.select_related("user").get( + friendly_token=friendly_token + ) + self.check_object_permissions(self.request, media) + if media.state == "private" and self.request.user != media.user: + return Response( + {"detail": "media is private"}, status=status.HTTP_400_BAD_REQUEST + ) + return media + except PermissionDenied: + return Response( + {"detail": "bad permissions"}, status=status.HTTP_400_BAD_REQUEST + ) + except BaseException: + return Response( + {"detail": "media file does not exist"}, + status=status.HTTP_400_BAD_REQUEST, + ) + + def get(self, request, friendly_token): + # list comments for a media + media = self.get_object(friendly_token) + if isinstance(media, Response): + return media + comments = media.comments.filter().prefetch_related("user") + pagination_class = api_settings.DEFAULT_PAGINATION_CLASS + paginator = pagination_class() + page = paginator.paginate_queryset(comments, request) + serializer = CommentSerializer(page, many=True, context={"request": request}) + return paginator.get_paginated_response(serializer.data) + + def delete(self, request, friendly_token, uid=None): + """Delete a comment + Administrators, MediaCMS editors and managers, + media owner, and comment owners, can delete a comment + """ + if uid: + try: + comment = Comment.objects.get(uid=uid) + except BaseException: + return Response( + {"detail": "comment does not exist"}, + status=status.HTTP_400_BAD_REQUEST, + ) + if ( + (comment.user == self.request.user) + or comment.media.user == self.request.user + or is_mediacms_editor(self.request.user) + ): + comment.delete() + else: + return Response( + {"detail": "bad permissions"}, status=status.HTTP_400_BAD_REQUEST + ) + return Response(status=status.HTTP_204_NO_CONTENT) + + def post(self, request, friendly_token): + """Create a comment""" + media = self.get_object(friendly_token) + if isinstance(media, Response): + return media + + if not media.enable_comments: + return Response( + {"detail": "comments not allowed here"}, + status=status.HTTP_400_BAD_REQUEST, + ) + + serializer = CommentSerializer(data=request.data, context={"request": request}) + if serializer.is_valid(): + serializer.save(user=request.user, media=media) + if request.user != media.user: + notify_user_on_comment(friendly_token=media.friendly_token) + return Response(serializer.data, status=status.HTTP_201_CREATED) + return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) + + +class UserActions(APIView): + parser_classes = (JSONParser,) + + def get(self, request, action): + media = [] + if action in VALID_USER_ACTIONS: + if request.user.is_authenticated: + media = ( + Media.objects.select_related("user") + .filter( + mediaactions__user=request.user, mediaactions__action=action + ) + .order_by("-mediaactions__action_date") + ) + elif request.session.session_key: + media = ( + Media.objects.select_related("user") + .filter( + mediaactions__session_key=request.session.session_key, + mediaactions__action=action, + ) + .order_by("-mediaactions__action_date") + ) + + pagination_class = api_settings.DEFAULT_PAGINATION_CLASS + paginator = pagination_class() + page = paginator.paginate_queryset(media, request) + serializer = MediaSerializer(page, many=True, context={"request": request}) + return paginator.get_paginated_response(serializer.data) + + +class CategoryList(APIView): + """List categories""" + + def get(self, request, format=None): + categories = Category.objects.filter().order_by("title") + serializer = CategorySerializer( + categories, many=True, context={"request": request} + ) + ret = serializer.data + return Response(ret) + + +class TagList(APIView): + """List tags""" + + def get(self, request, format=None): + tags = Tag.objects.filter().order_by("-media_count") + pagination_class = api_settings.DEFAULT_PAGINATION_CLASS + paginator = pagination_class() + page = paginator.paginate_queryset(tags, request) + serializer = TagSerializer(page, many=True, context={"request": request}) + return paginator.get_paginated_response(serializer.data) + + +class EncodeProfileList(APIView): + """List encode profiles""" + + def get(self, request, format=None): + profiles = EncodeProfile.objects.all() + serializer = EncodeProfileSerializer( + profiles, many=True, context={"request": request} + ) + return Response(serializer.data) + + +class TasksList(APIView): + """List tasks""" + + permission_classes = (permissions.IsAdminUser,) + + def get(self, request, format=None): + ret = list_tasks() + return Response(ret) + + +class TaskDetail(APIView): + """Cancel a task""" + + permission_classes = (permissions.IsAdminUser,) + + def delete(self, request, uid, format=None): + revoke(uid, terminate=True) + return Response(status=status.HTTP_204_NO_CONTENT) diff --git a/fixtures/categories.json b/fixtures/categories.json new file mode 100644 index 0000000..23dbac5 --- /dev/null +++ b/fixtures/categories.json @@ -0,0 +1 @@ +[{"model": "files.category", "pk": 1, "fields": {"uid": "6a376886-4fdb-4d68-a4f3-b2c978fa6b08", "add_date": "2020-04-11T18:06:32.397Z", "title": "Art", "description": "", "user": null, "is_global": false, "media_count": 0, "thumbnail": ""}}, {"model": "files.category", "pk": 2, "fields": {"uid": "3067680e-b3d9-4e8e-8d55-a868e9f2b8a5", "add_date": "2020-04-11T18:06:36.768Z", "title": "Documentary", "description": "", "user": null, "is_global": false, "media_count": 0, "thumbnail": ""}}, {"model": "files.category", "pk": 3, "fields": {"uid": "3fb841f8-2baa-4b92-890a-8ca7bcd3fa40", "add_date": "2020-04-11T18:06:42.009Z", "title": "Experimental", "description": "", "user": null, "is_global": false, "media_count": 0, "thumbnail": ""}}, {"model": "files.category", "pk": 4, "fields": {"uid": "b7a1a749-a13e-489a-adf8-ee1c514b1677", "add_date": "2020-04-11T18:06:52.826Z", "title": "Film", "description": "", "user": null, "is_global": false, "media_count": 0, "thumbnail": ""}}, {"model": "files.category", "pk": 5, "fields": {"uid": "0073814e-a4dd-42a6-a5a8-9d219606be6b", "add_date": "2020-04-11T18:06:57.486Z", "title": "Music", "description": "", "user": null, "is_global": false, "media_count": 0, "thumbnail": ""}}, {"model": "files.category", "pk": 6, "fields": {"uid": "38534d33-7116-4ce9-9d96-9cde60744b9a", "add_date": "2020-04-11T18:07:05.455Z", "title": "TV", "description": "", "user": null, "is_global": false, "media_count": 0, "thumbnail": ""}}] diff --git a/fixtures/encoding_profiles.json b/fixtures/encoding_profiles.json new file mode 100644 index 0000000..c983103 --- /dev/null +++ b/fixtures/encoding_profiles.json @@ -0,0 +1 @@ +[{"model": "files.encodeprofile", "pk": 19, "fields": {"name": "h264-2160", "extension": "mp4", "resolution": 2160, "codec": "h264", "description": "", "active": false}}, {"model": "files.encodeprofile", "pk": 2, "fields": {"name": "vp9-2160", "extension": "webm", "resolution": 2160, "codec": "vp9", "description": "", "active": false}}, {"model": "files.encodeprofile", "pk": 16, "fields": {"name": "h265-2160", "extension": "mp4", "resolution": 2160, "codec": "h265", "description": "", "active": false}}, {"model": "files.encodeprofile", "pk": 4, "fields": {"name": "h264-1440", "extension": "mp4", "resolution": 1440, "codec": "h264", "description": "", "active": false}}, {"model": "files.encodeprofile", "pk": 5, "fields": {"name": "vp9-1440", "extension": "webm", "resolution": 1440, "codec": "vp9", "description": "", "active": false}}, {"model": "files.encodeprofile", "pk": 6, "fields": {"name": "h265-1440", "extension": "mp4", "resolution": 1440, "codec": "h265", "description": "", "active": false}}, {"model": "files.encodeprofile", "pk": 7, "fields": {"name": "h264-1080", "extension": "mp4", "resolution": 1080, "codec": "h264", "description": "", "active": true}}, {"model": "files.encodeprofile", "pk": 8, "fields": {"name": "vp9-1080", "extension": "webm", "resolution": 1080, "codec": "vp9", "description": "", "active": false}}, {"model": "files.encodeprofile", "pk": 9, "fields": {"name": "h265-1080", "extension": "mp4", "resolution": 1080, "codec": "h265", "description": "", "active": false}}, {"model": "files.encodeprofile", "pk": 10, "fields": {"name": "h264-720", "extension": "mp4", "resolution": 720, "codec": "h264", "description": "", "active": true}}, {"model": "files.encodeprofile", "pk": 11, "fields": {"name": "vp9-720", "extension": "webm", "resolution": 720, "codec": "vp9", "description": "", "active": false}}, {"model": "files.encodeprofile", "pk": 12, "fields": {"name": "h265-720", "extension": "mp4", "resolution": 720, "codec": "h265", "description": "", "active": false}}, {"model": "files.encodeprofile", "pk": 13, "fields": {"name": "h264-480", "extension": "mp4", "resolution": 480, "codec": "h264", "description": "", "active": true}}, {"model": "files.encodeprofile", "pk": 14, "fields": {"name": "vp9-480", "extension": "webm", "resolution": 480, "codec": "vp9", "description": "", "active": false}}, {"model": "files.encodeprofile", "pk": 15, "fields": {"name": "h265-480", "extension": "mp4", "resolution": 480, "codec": "h265", "description": "", "active": false}}, {"model": "files.encodeprofile", "pk": 3, "fields": {"name": "h264-360", "extension": "mp4", "resolution": 360, "codec": "h264", "description": "", "active": true}}, {"model": "files.encodeprofile", "pk": 17, "fields": {"name": "vp9-360", "extension": "webm", "resolution": 360, "codec": "vp9", "description": "", "active": false}}, {"model": "files.encodeprofile", "pk": 18, "fields": {"name": "h265-360", "extension": "mp4", "resolution": 360, "codec": "h265", "description": "", "active": false}}, {"model": "files.encodeprofile", "pk": 2, "fields": {"name": "h264-240", "extension": "mp4", "resolution": 240, "codec": "h264", "description": "", "active": true}}, {"model": "files.encodeprofile", "pk": 20, "fields": {"name": "vp9-240", "extension": "webm", "resolution": 240, "codec": "vp9", "description": "", "active": false}}, {"model": "files.encodeprofile", "pk": 21, "fields": {"name": "h265-240", "extension": "mp4", "resolution": 240, "codec": "h265", "description": "", "active": false}}, {"model": "files.encodeprofile", "pk": 1, "fields": {"name": "preview", "extension": "gif", "resolution": null, "codec": null, "description": "", "active": true}}] diff --git a/install.sh b/install.sh new file mode 100644 index 0000000..38caa4b --- /dev/null +++ b/install.sh @@ -0,0 +1,123 @@ +#!/bin/bash +# should be run as root and only on Ubuntu 18/20 versions! +echo "Welcome to the MediacMS installation!"; + +if [ `id -u` -ne 0 ] + then echo "Please run as root" + exit +fi + + +while true; do + read -p " +This script will attempt to perform a system update, install required dependencies, install and configure PostgreSQL, NGINX, Redis and a few other utilities. +It is expected to run on a new system **with no running instances of any these services**. Make sure you check the script before you continue. Then enter yes or no +" yn + case $yn in + [Yy]* ) echo "OK!"; break;; + [Nn]* ) echo "Have a great day"; exit;; + * ) echo "Please answer yes or no.";; + esac +done + + +if [[ `lsb_release -d` == *"Ubuntu 20"* ]]; then + echo 'Performing system update and dependency installation, this will take a few minutes' + apt-get update && apt-get -y upgrade && apt install python3-venv python3-dev virtualenv redis-server postgresql nginx git gcc vim unzip ffmpeg imagemagick python3-certbot-nginx certbot -y +elif [[ `lsb_release -d` = *"Ubuntu 18"* ]]; then + echo 'Performing system update and dependency installation, this will take a few minutes' + apt-get update && apt-get -y upgrade && apt install python3-venv python3-dev virtualenv redis-server postgresql nginx git gcc vim unzip ffmpeg imagemagick python3-certbot-nginx certbot -y +else + echo "This script is tested for Ubuntu 18 and 20 versions only, if you want to try MediaCMS on another system you have to perform the manual installation" +exit +fi + +read -p "Enter portal URL, or press enter for localhost : " FRONTEND_HOST +read -p "Enter portal name, or press enter for 'MediaCMS : " PORTAL_NAME + +[ -z "$PORTAL_NAME" ] && PORTAL_NAME='MediaCMS' +[ -z "$FRONTEND_HOST" ] && FRONTEND_HOST='localhost' + +echo 'Creating database to be used in MediaCMS' + +su -c "psql -c \"CREATE DATABASE mediacms\"" postgres +su -c "psql -c \"CREATE USER mediacms WITH ENCRYPTED PASSWORD 'mediacms'\"" postgres +su -c "psql -c \"GRANT ALL PRIVILEGES ON DATABASE mediacms TO mediacms\"" postgres + +echo 'Creating python virtualenv on /home/mediacms.io' + +cd /home/mediacms.io +virtualenv . --python=python3 +source /home/mediacms.io/bin/activate +cd mediacms +pip install -r requirements.txt + +SECRET_KEY=`python -c 'from django.core.management.utils import get_random_secret_key; print(get_random_secret_key())'` + +# remove http or https prefix +FRONTEND_HOST=`echo "$FRONTEND_HOST" | sed -r 's/http:\/\///g'` +FRONTEND_HOST=`echo "$FRONTEND_HOST" | sed -r 's/https:\/\///g'` + +sed -i s/localhost/$FRONTEND_HOST/g deploy/mediacms.io + + +echo 'FRONTEND_HOST='\'"$FRONTEND_HOST"\' >> cms/local_settings.py +echo 'PORTAL_NAME='\'"$PORTAL_NAME"\' >> cms/local_settings.py +echo "SSL_FRONTEND_HOST = FRONTEND_HOST.replace('http', 'https')" >> cms/local_settings.py + +echo 'SECRET_KEY='\'"$SECRET_KEY"\' >> cms/local_settings.py + +mkdir logs +mkdir pids +python manage.py migrate +python manage.py loaddata fixtures/encoding_profiles.json +python manage.py loaddata fixtures/categories.json +python manage.py collectstatic --noinput + +ADMIN_PASS=`python -c "import secrets;chars = 'abcdefghijklmnopqrstuvwxyz0123456789';print(''.join(secrets.choice(chars) for i in range(10)))"` +echo "from users.models import User; User.objects.create_superuser('admin', 'admin@example.com', '$ADMIN_PASS')" | python manage.py shell + +echo "from django.contrib.sites.models import Site; Site.objects.update(name='$FRONTEND_HOST', domain='$FRONTEND_HOST')" | python manage.py shell + +chown -R www-data. /home/mediacms.io/ +cp deploy/celery_long.service /etc/systemd/system/celery_long.service && systemctl enable celery_long && systemctl start celery_long +cp deploy/celery_short.service /etc/systemd/system/celery_short.service && systemctl enable celery_short && systemctl start celery_short +cp deploy/celery_beat.service /etc/systemd/system/celery_beat.service && systemctl enable celery_beat &&systemctl start celery_beat +cp deploy/mediacms.service /etc/systemd/system/mediacms.service && systemctl enable mediacms.service && systemctl start mediacms.service + +mkdir -p /etc/letsencrypt/live/mediacms.io/ +mkdir -p /etc/letsencrypt/live/$FRONTEND_HOST +cp deploy/mediacms.io_fullchain.pem /etc/letsencrypt/live/$FRONTEND_HOST/fullchain.pem +cp deploy/mediacms.io_privkey.pem /etc/letsencrypt/live/$FRONTEND_HOST/privkey.pem +cp deploy/mediacms.io /etc/nginx/sites-available/default +cp deploy/mediacms.io /etc/nginx/sites-enabled/default +cp deploy/uwsgi_params /etc/nginx/sites-enabled/uwsgi_params +cp deploy/nginx.conf /etc/nginx/ +systemctl stop nginx +systemctl start nginx + +# attempt to get a valid certificate for specified domain + +if [ "$FRONTEND_HOST" != "localhost" ]; then + echo 'attempt to get a valid certificate for specified url $FRONTEND_HOST' + certbot --nginx -n --agree-tos --register-unsafely-without-email -d $FRONTEND_HOST + certbot --nginx -n --agree-tos --register-unsafely-without-email -d $FRONTEND_HOST + # unfortunately for some reason it needs to be run two times in order to create the entries + # and directory structure!!! + systemctl restart nginx +else + echo "will not call certbot utility to update ssl certificate for url 'localhost', using default ssl certificate" +fi + + +# Bento4 utility installation, for HLS + +cd /home/mediacms.io/mediacms +wget http://zebulon.bok.net/Bento4/binaries/Bento4-SDK-1-6-0-632.x86_64-unknown-linux.zip +unzip Bento4-SDK-1-6-0-632.x86_64-unknown-linux.zip +mkdir /home/mediacms.io/mediacms/media_files/hls + +# last, set default owner +chown -R www-data. /home/mediacms.io/ + +echo 'MediaCMS installation completed, open browser on http://'"$FRONTEND_HOST"' and login with user admin and password '"$ADMIN_PASS"'' diff --git a/manage.py b/manage.py new file mode 100755 index 0000000..cf1a63a --- /dev/null +++ b/manage.py @@ -0,0 +1,15 @@ +#!/usr/bin/env python +import os +import sys + +if __name__ == "__main__": + os.environ.setdefault("DJANGO_SETTINGS_MODULE", "cms.settings") + try: + from django.core.management import execute_from_command_line + except ImportError as exc: + raise ImportError( + "Couldn't import Django. Are you sure it's installed and " + "available on your PYTHONPATH environment variable? Did you " + "forget to activate a virtual environment?" + ) from exc + execute_from_command_line(sys.argv) diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..1f2523b --- /dev/null +++ b/requirements.txt @@ -0,0 +1,32 @@ +Django==3.1.4 +djangorestframework==3.12.2 +django-allauth==0.44.0 + +psycopg2-binary==2.8.6 + +uwsgi==2.0.19.1 + +django-redis==4.12.1 +celery==4.4.7 + +Pillow==8.0.1 +django-imagekit +markdown +django-filter +filetype +django-mptt +django-crispy-forms +requests==2.25.0 +django-celery-email +m3u8 + +django-ckeditor + +# extra nice utilities! +rpdb +tqdm +ipython +flake8 +pep8 +django-silk +django-debug-toolbar diff --git a/uploader/__init__.py b/uploader/__init__.py new file mode 100644 index 0000000..3e2f46a --- /dev/null +++ b/uploader/__init__.py @@ -0,0 +1 @@ +__version__ = "0.9.0" diff --git a/uploader/apps.py b/uploader/apps.py new file mode 100644 index 0000000..72a7575 --- /dev/null +++ b/uploader/apps.py @@ -0,0 +1,6 @@ +# -*- coding: utf-8 +from django.apps import AppConfig + + +class UploaderConfig(AppConfig): + name = "uploader" diff --git a/uploader/fineuploader.py b/uploader/fineuploader.py new file mode 100644 index 0000000..fd1bfae --- /dev/null +++ b/uploader/fineuploader.py @@ -0,0 +1,96 @@ +from os.path import join +from io import StringIO +import shutil +from django.conf import settings + +from . import utils + + +class BaseFineUploader(object): + def __init__(self, data, *args, **kwargs): + self.data = data + self.total_filesize = data.get("qqtotalfilesize") + self.filename = data.get("qqfilename") + self.uuid = data.get("qquuid") + self.file = data.get("qqfile") + self.storage_class = settings.FILE_STORAGE + self.real_path = None + + @property + def finished(self): + return self.real_path is not None + + @property + def file_path(self): + return join(settings.UPLOAD_DIR, self.uuid) + + @property + def _full_file_path(self): + return join(self.file_path, self.filename) + + @property + def storage(self): + file_storage = utils.import_class(self.storage_class) + return file_storage() + + @property + def url(self): + if not self.finished: + return None + return self.storage.url(self.real_path) + + +class ChunkedFineUploader(BaseFineUploader): + concurrent = True + + def __init__(self, data, concurrent=True, *args, **kwargs): + super(ChunkedFineUploader, self).__init__(data, *args, **kwargs) + self.concurrent = concurrent + self.total_parts = data.get("qqtotalparts") + if not isinstance(self.total_parts, int): + self.total_parts = 1 + self.part_index = data.get("qqpartindex") + + @property + def chunks_path(self): + return join(settings.CHUNKS_DIR, self.uuid) + + @property + def _abs_chunks_path(self): + return join(settings.MEDIA_ROOT, self.chunks_path) + + @property + def chunk_file(self): + return join(self.chunks_path, str(self.part_index)) + + @property + def chunked(self): + return self.total_parts > 1 + + @property + def is_time_to_combine_chunks(self): + return self.total_parts - 1 == self.part_index + + def combine_chunks(self): + # implement the same behaviour. + self.real_path = self.storage.save(self._full_file_path, StringIO()) + with self.storage.open(self.real_path, "wb") as final_file: + for i in range(self.total_parts): + part = join(self.chunks_path, str(i)) + with self.storage.open(part, "rb") as source: + final_file.write(source.read()) + shutil.rmtree(self._abs_chunks_path) + + def _save_chunk(self): + return self.storage.save(self.chunk_file, self.file) + + def save(self): + if self.chunked: + chunk = self._save_chunk() + if not self.concurrent and self.is_time_to_combine_chunks: + self.combine_chunks() + return self.real_path + return chunk + else: + self.real_path = self.storage.save(self._full_file_path, self.file) + return self.real_path diff --git a/uploader/forms.py b/uploader/forms.py new file mode 100644 index 0000000..e89175d --- /dev/null +++ b/uploader/forms.py @@ -0,0 +1,19 @@ +from django import forms + + +class FineUploaderUploadForm(forms.Form): + qqfile = forms.FileField() + qquuid = forms.CharField() + qqfilename = forms.CharField() + qqpartindex = forms.IntegerField(required=False) + qqchunksize = forms.IntegerField(required=False) + qqtotalparts = forms.IntegerField(required=False) + qqtotalfilesize = forms.IntegerField(required=False) + qqpartbyteoffset = forms.IntegerField(required=False) + + +class FineUploaderUploadSuccessForm(forms.Form): + qquuid = forms.CharField() + qqfilename = forms.CharField() + qqtotalparts = forms.IntegerField() + qqtotalfilesize = forms.IntegerField(required=False) diff --git a/uploader/models.py b/uploader/models.py new file mode 100644 index 0000000..40a96af --- /dev/null +++ b/uploader/models.py @@ -0,0 +1 @@ +# -*- coding: utf-8 -*- diff --git a/uploader/urls.py b/uploader/urls.py new file mode 100644 index 0000000..2e0d06a --- /dev/null +++ b/uploader/urls.py @@ -0,0 +1,10 @@ +# -*- coding: utf-8 -*- +from django.conf.urls import url + +from . import views + +app_name = "uploader" + +urlpatterns = [ + url(r"^upload/$", views.FineUploaderView.as_view(), name="upload"), +] diff --git a/uploader/utils.py b/uploader/utils.py new file mode 100644 index 0000000..f752c96 --- /dev/null +++ b/uploader/utils.py @@ -0,0 +1,22 @@ +from django.core.exceptions import ImproperlyConfigured +from importlib import import_module + + +def import_class(path): + path_bits = path.split(".") + + if len(path_bits) < 2: + message = "'{0}' is not a complete Python path.".format(path) + raise ImproperlyConfigured(message) + + class_name = path_bits.pop() + module_path = ".".join(path_bits) + module_itself = import_module(module_path) + + if not hasattr(module_itself, class_name): + message = "The Python module '{}' has no '{}' class.".format( + module_path, class_name + ) + raise ImportError(message) + + return getattr(module_itself, class_name) diff --git a/uploader/views.py b/uploader/views.py new file mode 100644 index 0000000..93b2da1 --- /dev/null +++ b/uploader/views.py @@ -0,0 +1,76 @@ +# -*- coding: utf-8 -*- +import os +import shutil + +from django.http import JsonResponse +from django.views import generic +from django.conf import settings +from django.core.files import File +from django.core.exceptions import PermissionDenied + +from cms.permissions import user_allowed_to_upload +from files.models import Media +from files.helpers import rm_file +from .forms import FineUploaderUploadForm, FineUploaderUploadSuccessForm +from .fineuploader import ChunkedFineUploader + + +class FineUploaderView(generic.FormView): + http_method_names = ("post",) + form_class_upload = FineUploaderUploadForm + form_class_upload_success = FineUploaderUploadSuccessForm + + @property + def concurrent(self): + return settings.CONCURRENT_UPLOADS + + @property + def chunks_done(self): + return self.chunks_done_param_name in self.request.GET + + @property + def chunks_done_param_name(self): + return settings.CHUNKS_DONE_PARAM_NAME + + def make_response(self, data, **kwargs): + return JsonResponse(data, **kwargs) + + def get_form(self, form_class=None): + if self.chunks_done: + form_class = self.form_class_upload_success + else: + form_class = self.form_class_upload + return form_class(**self.get_form_kwargs()) + + def dispatch(self, request, *args, **kwargs): + if not user_allowed_to_upload(request): + raise PermissionDenied # HTTP 403 + return super(FineUploaderView, self).dispatch(request, *args, **kwargs) + + def form_valid(self, form): + self.upload = ChunkedFineUploader(form.cleaned_data, self.concurrent) + if self.upload.concurrent and self.chunks_done: + try: + self.upload.combine_chunks() + except FileNotFoundError: + data = {"success": False, "error": "Error with File Uploading"} + return self.make_response(data, status=400) + elif self.upload.total_parts == 1: + self.upload.save() + else: + self.upload.save() + return self.make_response({"success": True}) + # create media! + media_file = os.path.join(settings.MEDIA_ROOT, self.upload.real_path) + with open(media_file, "rb") as f: + myfile = File(f) + new = Media.objects.create(media_file=myfile, user=self.request.user) + rm_file(media_file) + shutil.rmtree(os.path.join(settings.MEDIA_ROOT, self.upload.file_path)) + return self.make_response( + {"success": True, "media_url": new.get_absolute_url()} + ) + + def form_invalid(self, form): + data = {"success": False, "error": "%s" % repr(form.errors)} + return self.make_response(data, status=400) diff --git a/users/__init__.py b/users/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/users/adapter.py b/users/adapter.py new file mode 100644 index 0000000..0e35a99 --- /dev/null +++ b/users/adapter.py @@ -0,0 +1,22 @@ +from django.urls import reverse +from django.conf import settings +from allauth.account.adapter import DefaultAccountAdapter +from django.core.exceptions import ValidationError + + +class MyAccountAdapter(DefaultAccountAdapter): + def get_email_confirmation_url_stub(self, request, emailconfirmation): + url = reverse("account_confirm_email", args=[emailconfirmation.key]) + return settings.SSL_FRONTEND_HOST + url + + def clean_email(self, email): + if email.split("@")[1] in settings.RESTRICTED_DOMAINS_FOR_USER_REGISTRATION: + raise ValidationError("Domain is restricted from registering") + return email + + def is_open_for_signup(self, request): + return settings.USERS_CAN_SELF_REGISTER + + def send_mail(self, template_prefix, email, context): + msg = self.render_mail(template_prefix, email, context) + msg.send(fail_silently=True) diff --git a/users/admin.py b/users/admin.py new file mode 100644 index 0000000..04c453b --- /dev/null +++ b/users/admin.py @@ -0,0 +1,38 @@ +from django.contrib import admin + +from .models import User + + +class UserAdmin(admin.ModelAdmin): + search_fields = ["email", "username", "name"] + exclude = ( + "user_permissions", + "title", + "password", + "groups", + "last_login", + "is_featured", + "location", + "first_name", + "last_name", + "media_count", + "date_joined", + "is_staff", + "is_active", + ) + list_display = [ + "username", + "name", + "email", + "logo", + "date_added", + "is_superuser", + "is_editor", + "is_manager", + "media_count", + ] + list_filter = ["is_superuser", "is_editor", "is_manager"] + ordering = ("-date_added",) + + +admin.site.register(User, UserAdmin) diff --git a/users/apps.py b/users/apps.py new file mode 100644 index 0000000..3ef1284 --- /dev/null +++ b/users/apps.py @@ -0,0 +1,5 @@ +from django.apps import AppConfig + + +class UsersConfig(AppConfig): + name = "users" diff --git a/users/forms.py b/users/forms.py new file mode 100644 index 0000000..1113ba4 --- /dev/null +++ b/users/forms.py @@ -0,0 +1,59 @@ +from django import forms +from .models import User, Channel + + +class SignupForm(forms.Form): + name = forms.CharField(max_length=100, label="Name") + + def signup(self, request, user): + user.name = self.cleaned_data["name"] + user.save() + + +class UserForm(forms.ModelForm): + class Meta: + model = User + fields = ( + "name", + "description", + "email", + "logo", + "notification_on_comments", + "is_featured", + "advancedUser", + "is_manager", + "is_editor", + #"allow_contact", + ) + + def clean_logo(self): + image = self.cleaned_data.get("logo", False) + if image: + if image.size > 2 * 1024 * 1024: + raise forms.ValidationError("Image file too large ( > 2mb )") + return image + else: + raise forms.ValidationError("Please provide a logo") + + def __init__(self, user, *args, **kwargs): + super(UserForm, self).__init__(*args, **kwargs) + self.fields.pop("is_featured") + if not user.is_superuser: + self.fields.pop("advancedUser") + self.fields.pop("is_manager") + self.fields.pop("is_editor") + + +class ChannelForm(forms.ModelForm): + class Meta: + model = Channel + fields = ("banner_logo",) + + def clean_banner_logo(self): + image = self.cleaned_data.get("banner_logo", False) + if image: + if image.size > 2 * 1024 * 1024: + raise forms.ValidationError("Image file too large ( > 2mb )") + return image + else: + raise forms.ValidationError("Please provide a banner") diff --git a/users/migrations/0001_initial.py b/users/migrations/0001_initial.py new file mode 100644 index 0000000..e577d9f --- /dev/null +++ b/users/migrations/0001_initial.py @@ -0,0 +1,283 @@ +# Generated by Django 3.1.4 on 2020-12-01 07:12 + +from django.conf import settings +import django.contrib.auth.models +import django.contrib.auth.validators +from django.db import migrations, models +import django.db.models.deletion +import django.utils.timezone +import imagekit.models.fields + + +class Migration(migrations.Migration): + + initial = True + + dependencies = [ + ("auth", "0012_alter_user_first_name_max_length"), + ] + + operations = [ + migrations.CreateModel( + name="User", + fields=[ + ( + "id", + models.AutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ("password", models.CharField(max_length=128, verbose_name="password")), + ( + "last_login", + models.DateTimeField( + blank=True, null=True, verbose_name="last login" + ), + ), + ( + "is_superuser", + models.BooleanField( + default=False, + help_text="Designates that this user has all permissions without explicitly assigning them.", + verbose_name="superuser status", + ), + ), + ( + "username", + models.CharField( + error_messages={ + "unique": "A user with that username already exists." + }, + help_text="Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.", + max_length=150, + unique=True, + validators=[ + django.contrib.auth.validators.UnicodeUsernameValidator() + ], + verbose_name="username", + ), + ), + ( + "first_name", + models.CharField( + blank=True, max_length=150, verbose_name="first name" + ), + ), + ( + "last_name", + models.CharField( + blank=True, max_length=150, verbose_name="last name" + ), + ), + ( + "email", + models.EmailField( + blank=True, max_length=254, verbose_name="email address" + ), + ), + ( + "is_staff", + models.BooleanField( + default=False, + help_text="Designates whether the user can log into this admin site.", + verbose_name="staff status", + ), + ), + ( + "is_active", + models.BooleanField( + default=True, + help_text="Designates whether this user should be treated as active. Unselect this instead of deleting accounts.", + verbose_name="active", + ), + ), + ( + "date_joined", + models.DateTimeField( + default=django.utils.timezone.now, verbose_name="date joined" + ), + ), + ( + "logo", + imagekit.models.fields.ProcessedImageField( + blank=True, + default="userlogos/user.jpg", + upload_to="userlogos/%Y/%m/%d", + ), + ), + ("description", models.TextField(blank=True, verbose_name="About me")), + ( + "name", + models.CharField( + db_index=True, max_length=250, verbose_name="full name" + ), + ), + ( + "date_added", + models.DateTimeField( + db_index=True, + default=django.utils.timezone.now, + verbose_name="date added", + ), + ), + ( + "is_featured", + models.BooleanField( + db_index=True, default=False, verbose_name="Is featured" + ), + ), + ( + "title", + models.CharField(blank=True, max_length=250, verbose_name="Title"), + ), + ( + "advancedUser", + models.BooleanField( + db_index=True, default=False, verbose_name="advanced user" + ), + ), + ("media_count", models.IntegerField(default=0)), + ( + "notification_on_comments", + models.BooleanField( + default=True, + verbose_name="Whether you will receive email notifications for comments added to your content", + ), + ), + ( + "allow_contact", + models.BooleanField( + default=False, + verbose_name="Whether allow contact will be shown on profile page", + ), + ), + ( + "location", + models.CharField( + blank=True, max_length=250, verbose_name="Location" + ), + ), + ( + "is_editor", + models.BooleanField( + db_index=True, default=False, verbose_name="MediaCMS Editor" + ), + ), + ( + "is_manager", + models.BooleanField( + db_index=True, default=False, verbose_name="MediaCMS Manager" + ), + ), + ( + "groups", + models.ManyToManyField( + blank=True, + help_text="The groups this user belongs to. A user will get all permissions granted to each of their groups.", + related_name="user_set", + related_query_name="user", + to="auth.Group", + verbose_name="groups", + ), + ), + ( + "user_permissions", + models.ManyToManyField( + blank=True, + help_text="Specific permissions for this user.", + related_name="user_set", + related_query_name="user", + to="auth.Permission", + verbose_name="user permissions", + ), + ), + ], + options={ + "ordering": ["-date_added", "name"], + }, + managers=[ + ("objects", django.contrib.auth.models.UserManager()), + ], + ), + migrations.CreateModel( + name="Notification", + fields=[ + ( + "id", + models.AutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ("action", models.CharField(blank=True, max_length=30)), + ("notify", models.BooleanField(default=False)), + ( + "method", + models.CharField( + choices=[("email", "Email")], default="email", max_length=20 + ), + ), + ( + "user", + models.ForeignKey( + on_delete=django.db.models.deletion.CASCADE, + related_name="notifications", + to=settings.AUTH_USER_MODEL, + ), + ), + ], + ), + migrations.CreateModel( + name="Channel", + fields=[ + ( + "id", + models.AutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ("title", models.CharField(db_index=True, max_length=90)), + ("description", models.TextField(blank=True, help_text="description")), + ("add_date", models.DateTimeField(auto_now_add=True, db_index=True)), + ("friendly_token", models.CharField(blank=True, max_length=12)), + ( + "banner_logo", + imagekit.models.fields.ProcessedImageField( + blank=True, + default="userlogos/banner.jpg", + upload_to="userlogos/%Y/%m/%d", + ), + ), + ( + "subscribers", + models.ManyToManyField( + blank=True, + related_name="subscriptions", + to=settings.AUTH_USER_MODEL, + ), + ), + ( + "user", + models.ForeignKey( + on_delete=django.db.models.deletion.CASCADE, + related_name="channels", + to=settings.AUTH_USER_MODEL, + ), + ), + ], + ), + migrations.AddIndex( + model_name="user", + index=models.Index( + fields=["-date_added", "name"], name="users_user_date_ad_4eb0b8_idx" + ), + ), + ] diff --git a/users/migrations/__init__.py b/users/migrations/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/users/models.py b/users/models.py new file mode 100644 index 0000000..dd087ad --- /dev/null +++ b/users/models.py @@ -0,0 +1,220 @@ +from django.db import models +from django.conf import settings +from django.contrib.auth.models import AbstractUser +from django.utils import timezone +from django.urls import reverse +from django.dispatch import receiver +from django.db.models.signals import post_save, post_delete +from django.utils.html import strip_tags +from django.core.mail import EmailMessage + +from imagekit.processors import ResizeToFill +from imagekit.models import ProcessedImageField + +import files.helpers as helpers +from files.models import Media, Tag, Category + + +class User(AbstractUser): + logo = ProcessedImageField( + upload_to="userlogos/%Y/%m/%d", + processors=[ResizeToFill(200, 200)], + default="userlogos/user.jpg", + format="JPEG", + options={"quality": 75}, + blank=True, + ) + description = models.TextField("About me", blank=True) + + name = models.CharField("full name", max_length=250, db_index=True) + date_added = models.DateTimeField("date added", default=timezone.now, db_index=True) + is_featured = models.BooleanField("Is featured", default=False, db_index=True) + + title = models.CharField("Title", max_length=250, blank=True) + advancedUser = models.BooleanField("advanced user", default=False, db_index=True) + media_count = models.IntegerField(default=0) # save number of videos + notification_on_comments = models.BooleanField( + "Whether you will receive email notifications for comments added to your content", + default=True, + ) + location = models.CharField("Location", max_length=250, blank=True) + is_editor = models.BooleanField("MediaCMS Editor", default=False, db_index=True) + is_manager = models.BooleanField("MediaCMS Manager", default=False, db_index=True) + allow_contact = models.BooleanField( + "Whether allow contact will be shown on profile page", default=False + ) + + class Meta: + ordering = ["-date_added", "name"] + indexes = [models.Index(fields=["-date_added", "name"])] + + def update_user_media(self): + self.media_count = Media.objects.filter(listable=True, user=self).count() + self.save(update_fields=["media_count"]) + return True + + def thumbnail_url(self): + if self.logo: + return helpers.url_from_path(self.logo.path) + return None + + def banner_thumbnail_url(self): + c = self.channels.filter().order_by("add_date").first() + if c: + return helpers.url_from_path(c.banner_logo.path) + return None + + @property + def email_is_verified(self): + if self.emailaddress_set.first(): + if self.emailaddress_set.first().verified: + return True + return False + + def get_absolute_url(self, api=False): + if api: + return reverse("api_get_user", kwargs={"username": self.username}) + else: + return reverse("get_user", kwargs={"username": self.username}) + + def edit_url(self): + return reverse("edit_user", kwargs={"username": self.username}) + + def default_channel_edit_url(self): + c = self.channels.filter().order_by("add_date").first() + if c: + return reverse("edit_channel", kwargs={"friendly_token": c.friendly_token}) + return None + + @property + def playlists_info(self): + ret = [] + for playlist in self.playlists.all(): + c = {} + c["title"] = playlist.title + c["description"] = playlist.description + c["media_count"] = playlist.media_count + c["add_date"] = playlist.add_date + c["url"] = playlist.get_absolute_url() + ret.append(c) + return ret + + @property + def media_info(self): + ret = {} + results = [] + ret["results"] = results + ret["user_media"] = "/api/v1/media?author={0}".format(self.username) + return ret + + def save(self, *args, **kwargs): + strip_text_items = ["name", "description", "title"] + for item in strip_text_items: + setattr(self, item, strip_tags(getattr(self, item, None))) + super(User, self).save(*args, **kwargs) + + +class Channel(models.Model): + title = models.CharField(max_length=90, db_index=True) + description = models.TextField(blank=True, help_text="description") + user = models.ForeignKey( + User, on_delete=models.CASCADE, db_index=True, related_name="channels" + ) + add_date = models.DateTimeField(auto_now_add=True, db_index=True) + subscribers = models.ManyToManyField(User, related_name="subscriptions", blank=True) + friendly_token = models.CharField(blank=True, max_length=12) + banner_logo = ProcessedImageField( + upload_to="userlogos/%Y/%m/%d", + processors=[ResizeToFill(900, 200)], + default="userlogos/banner.jpg", + format="JPEG", + options={"quality": 85}, + blank=True, + ) + + def save(self, *args, **kwargs): + strip_text_items = ["description", "title"] + for item in strip_text_items: + setattr(self, item, strip_tags(getattr(self, item, None))) + + if not self.friendly_token: + while True: + friendly_token = helpers.produce_friendly_token() + if not Channel.objects.filter(friendly_token=friendly_token): + self.friendly_token = friendly_token + break + super(Channel, self).save(*args, **kwargs) + + def __str__(self): + return "{0} -{1}".format(self.user.username, self.title) + + def get_absolute_url(self, edit=False): + if edit: + return reverse( + "edit_channel", kwargs={"friendly_token": self.friendly_token} + ) + else: + return reverse( + "view_channel", kwargs={"friendly_token": self.friendly_token} + ) + + @property + def edit_url(self): + return self.get_absolute_url(edit=True) + + +@receiver(post_save, sender=User) +def post_user_create(sender, instance, created, **kwargs): + # create a Channel object upon user creation, name it default + if created: + new = Channel.objects.create(title="default", user=instance) + new.save() + if settings.ADMINS_NOTIFICATIONS.get("NEW_USER", False): + title = "[{}] - New user just registered".format(settings.PORTAL_NAME) + msg = """ +User has just registered with email %s\n +Visit user profile page at %s + """ % ( + instance.email, + settings.SSL_FRONTEND_HOST + instance.get_absolute_url(), + ) + email = EmailMessage( + title, msg, settings.DEFAULT_FROM_EMAIL, settings.ADMIN_EMAIL_LIST + ) + email.send(fail_silently=True) + + +NOTIFICATION_METHODS = (("email", "Email"),) + + +class Notification(models.Model): + """User specific notifications + To be exposed on user profile + Needs work + """ + + user = models.ForeignKey( + User, on_delete=models.CASCADE, db_index=True, related_name="notifications" + ) + action = models.CharField(max_length=30, blank=True) + notify = models.BooleanField(default=False) + method = models.CharField( + max_length=20, choices=NOTIFICATION_METHODS, default="email" + ) + + def save(self, *args, **kwargs): + super(Notification, self).save(*args, **kwargs) + + def __str__(self): + return self.user.username + + +@receiver(post_delete, sender=User) +def delete_content(sender, instance, **kwargs): + """Delete user related content + Upon user deletion + """ + + Media.objects.filter(user=instance).delete() + Tag.objects.filter(user=instance).delete() + Category.objects.filter(user=instance).delete() diff --git a/users/serializers.py b/users/serializers.py new file mode 100644 index 0000000..a26d77b --- /dev/null +++ b/users/serializers.py @@ -0,0 +1,82 @@ +from rest_framework import serializers +from .models import User + + +class UserSerializer(serializers.ModelSerializer): + url = serializers.SerializerMethodField() + api_url = serializers.SerializerMethodField() + thumbnail_url = serializers.SerializerMethodField() + + def get_url(self, obj): + return self.context["request"].build_absolute_uri(obj.get_absolute_url()) + + def get_api_url(self, obj): + return self.context["request"].build_absolute_uri( + obj.get_absolute_url(api=True) + ) + + def get_thumbnail_url(self, obj): + return self.context["request"].build_absolute_uri(obj.thumbnail_url()) + + class Meta: + model = User + read_only_fields = ( + "date_added", + "is_featured", + "uid", + "username", + "advancedUser", + "is_editor", + "is_manager", + "email_is_verified", + ) + fields = ( + "description", + "date_added", + "name", + "is_featured", + "thumbnail_url", + "url", + "api_url", + "username", + "advancedUser", + "is_editor", + "is_manager", + "email_is_verified", + ) + + +class UserDetailSerializer(serializers.ModelSerializer): + url = serializers.SerializerMethodField() + api_url = serializers.SerializerMethodField() + thumbnail_url = serializers.SerializerMethodField() + + def get_url(self, obj): + return self.context["request"].build_absolute_uri(obj.get_absolute_url()) + + def get_api_url(self, obj): + return self.context["request"].build_absolute_uri( + obj.get_absolute_url(api=True) + ) + + def get_thumbnail_url(self, obj): + return self.context["request"].build_absolute_uri(obj.thumbnail_url()) + + class Meta: + model = User + read_only_fields = ("date_added", "is_featured", "uid", "username") + fields = ( + "description", + "date_added", + "name", + "is_featured", + "thumbnail_url", + "banner_thumbnail_url", + "url", + "username", + "media_info", + "api_url", + "edit_url", + "default_channel_edit_url", + ) + extra_kwargs = {"name": {"required": False}} diff --git a/users/tests.py b/users/tests.py new file mode 100644 index 0000000..e69de29 diff --git a/users/urls.py b/users/urls.py new file mode 100644 index 0000000..f1a8322 --- /dev/null +++ b/users/urls.py @@ -0,0 +1,43 @@ +from django.conf.urls import url +from . import views + +urlpatterns = [ + url(r"^user/(?P[\w@._-]*)$", views.view_user, name="get_user"), + url( + r"^user/(?P[\w@.]*)/media$", + views.view_user_media, + name="get_user_media", + ), + url( + r"^user/(?P[\w@.]*)/playlists$", + views.view_user_playlists, + name="get_user_playlists", + ), + url( + r"^user/(?P[\w@.]*)/about$", + views.view_user_about, + name="get_user_about", + ), + url(r"^user/(?P[\w@.]*)/edit$", views.edit_user, name="edit_user"), + url( + r"^channel/(?P[\w]*)$", views.view_channel, name="view_channel" + ), + url( + r"^channel/(?P[\w]*)/edit$", + views.edit_channel, + name="edit_channel", + ), + # API VIEWS + url(r"^api/v1/users$", views.UserList.as_view(), name="api_users"), + url(r"^api/v1/users/$", views.UserList.as_view()), + url( + r"^api/v1/users/(?P[\w@._-]*)$", + views.UserDetail.as_view(), + name="api_get_user", + ), + url( + r"^api/v1/users/(?P[\w@._-]*)/contact", + views.contact_user, + name="api_contact_user", + ), +] diff --git a/users/validators.py b/users/validators.py new file mode 100644 index 0000000..734e8f2 --- /dev/null +++ b/users/validators.py @@ -0,0 +1,18 @@ +import re + +from django.core import validators +from django.utils.deconstruct import deconstructible +from django.utils.translation import gettext_lazy as _ + + +@deconstructible +class ASCIIUsernameValidator(validators.RegexValidator): + regex = r"^[\w]+$" + message = _( + "Enter a valid username. This value may contain only " + "English letters and numbers" + ) + flags = re.ASCII + + +custom_username_validators = [ASCIIUsernameValidator()] diff --git a/users/views.py b/users/views.py new file mode 100644 index 0000000..50f2da8 --- /dev/null +++ b/users/views.py @@ -0,0 +1,296 @@ +from django.shortcuts import render +from django.http import HttpResponseRedirect +from django.contrib.auth.decorators import login_required +from django.core.mail import EmailMessage +from django.conf import settings + +from rest_framework import permissions +from rest_framework.views import APIView +from rest_framework.response import Response +from rest_framework.settings import api_settings +from rest_framework.exceptions import PermissionDenied +from rest_framework import status +from rest_framework.parsers import ( + JSONParser, + MultiPartParser, + FileUploadParser, + FormParser, +) +from rest_framework.decorators import api_view +from cms.permissions import IsUserOrManager +from files.methods import is_mediacms_manager, is_mediacms_editor +from .models import User, Channel +from .forms import UserForm, ChannelForm +from .serializers import UserSerializer, UserDetailSerializer + + +def get_user(username): + try: + user = User.objects.get(username=username) + return user + except User.DoesNotExist: + return None + + +def view_user(request, username): + context = {} + user = get_user(username=username) + if not user: + return HttpResponseRedirect("/members") + context["user"] = user + context["CAN_EDIT"] = ( + True + if ((user and user == request.user) or is_mediacms_manager(request.user)) + else False + ) + context["CAN_DELETE"] = True if is_mediacms_manager(request.user) else False + context["SHOW_CONTACT_FORM"] = ( + True if (user.allow_contact or is_mediacms_editor(request.user)) else False + ) + return render(request, "cms/user.html", context) + + +def view_user_media(request, username): + context = {} + user = get_user(username=username) + if not user: + return HttpResponseRedirect("/members") + + context["user"] = user + context["CAN_EDIT"] = ( + True + if ((user and user == request.user) or request.user.is_superuser) + else False + ) + context["CAN_DELETE"] = True if request.user.is_superuser else False + context["SHOW_CONTACT_FORM"] = ( + True if (user.allow_contact or is_mediacms_editor(request.user)) else False + ) + return render(request, "cms/user_media.html", context) + + +def view_user_playlists(request, username): + context = {} + user = get_user(username=username) + if not user: + return HttpResponseRedirect("/members") + + context["user"] = user + context["CAN_EDIT"] = ( + True + if ((user and user == request.user) or request.user.is_superuser) + else False + ) + context["CAN_DELETE"] = True if request.user.is_superuser else False + context["SHOW_CONTACT_FORM"] = ( + True if (user.allow_contact or is_mediacms_editor(request.user)) else False + ) + + return render(request, "cms/user_playlists.html", context) + + +def view_user_about(request, username): + context = {} + user = get_user(username=username) + if not user: + return HttpResponseRedirect("/members") + + context["user"] = user + context["CAN_EDIT"] = ( + True + if ((user and user == request.user) or request.user.is_superuser) + else False + ) + context["CAN_DELETE"] = True if request.user.is_superuser else False + context["SHOW_CONTACT_FORM"] = ( + True if (user.allow_contact or is_mediacms_editor(request.user)) else False + ) + + return render(request, "cms/user_about.html", context) + + +@login_required +def edit_user(request, username): + user = get_user(username=username) + if not user or (user != request.user and not is_mediacms_manager(request.user)): + return HttpResponseRedirect("/") + + if request.method == "POST": + form = UserForm(request.user, request.POST, request.FILES, instance=user) + if form.is_valid(): + user = form.save(commit=False) + user.save() + return HttpResponseRedirect(user.get_absolute_url()) + else: + form = UserForm(request.user, instance=user) + return render(request, "cms/user_edit.html", {"form": form}) + + +def view_channel(request, friendly_token): + context = {} + channel = Channel.objects.filter(friendly_token=friendly_token).first() + if not channel: + user = None + else: + user = channel.user + context["user"] = user + context["CAN_EDIT"] = ( + True + if ((user and user == request.user) or request.user.is_superuser) + else False + ) + return render(request, "cms/channel.html", context) + + +@login_required +def edit_channel(request, friendly_token): + channel = Channel.objects.filter(friendly_token=friendly_token).first() + if not ( + channel and request.user.is_authenticated and (request.user == channel.user) + ): + return HttpResponseRedirect("/") + + if request.method == "POST": + form = ChannelForm(request.POST, request.FILES, instance=channel) + if form.is_valid(): + channel = form.save(commit=False) + channel.save() + return HttpResponseRedirect(request.user.get_absolute_url()) + else: + form = ChannelForm(instance=channel) + return render(request, "cms/channel_edit.html", {"form": form}) + + +@api_view(["POST"]) +def contact_user(request, username): + if not request.user.is_authenticated: + return Response( + {"detail": "request need be authenticated"}, + status=status.HTTP_401_UNAUTHORIZED, + ) + user = User.objects.filter(username=username).first() + if user and (user.allow_contact or is_mediacms_editor(request.user)): + subject = request.data.get("subject") + from_email = request.user.email + subject = f"[{settings.PORTAL_NAME}] - Message from {from_email}" + body = request.data.get("body") + body = """ +You have received a message through the contact form\n +Sender name: %s +Sender email: %s\n +\n %s +""" % ( + request.user.name, + from_email, + body, + ) + email = EmailMessage( + subject, + body, + settings.DEFAULT_FROM_EMAIL, + [user.email], + reply_to=[from_email], + ) + email.send(fail_silently=True) + + return Response(status=status.HTTP_204_NO_CONTENT) + + +class UserList(APIView): + permission_classes = (permissions.IsAuthenticatedOrReadOnly,) + parser_classes = (JSONParser, MultiPartParser, FormParser, FileUploadParser) + + def get(self, request, format=None): + pagination_class = api_settings.DEFAULT_PAGINATION_CLASS + paginator = pagination_class() + users = User.objects.filter() + location = request.GET.get("location", "").strip() + if location: + users = users.filter(location=location) + + page = paginator.paginate_queryset(users, request) + + serializer = UserSerializer(page, many=True, context={"request": request}) + return paginator.get_paginated_response(serializer.data) + + +class UserDetail(APIView): + """""" + + permission_classes = (permissions.IsAuthenticatedOrReadOnly, IsUserOrManager) + parser_classes = (JSONParser, MultiPartParser, FormParser, FileUploadParser) + + def get_user(self, username): + try: + user = User.objects.get(username=username) + # this need be explicitly called, and will call + # has_object_permission() after has_permission has succeeded + self.check_object_permissions(self.request, user) + return user + except PermissionDenied: + return Response( + {"detail": "not enough permissions"}, status=status.HTTP_400_BAD_REQUEST + ) + except User.DoesNotExist: + return Response( + {"detail": "user does not exist"}, status=status.HTTP_400_BAD_REQUEST + ) + + def get(self, request, username, format=None): + # Get user details + user = self.get_user(username) + if isinstance(user, Response): + return user + + serializer = UserDetailSerializer(user, context={"request": request}) + return Response(serializer.data) + + def post(self, request, uid, format=None): + # USER + user = self.get_user(uid) + if isinstance(user, Response): + return user + + serializer = UserDetailSerializer( + user, data=request.data, context={"request": request} + ) + if serializer.is_valid(): + logo = request.data.get("logo") + if logo: + serializer.save(logo=logo) + else: + serializer.save() + + return Response(serializer.data, status=status.HTTP_201_CREATED) + return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) + + def put(self, request, uid, format=None): + # ADMIN + user = self.get_user(uid) + if isinstance(user, Response): + return user + + if not request.user.is_superuser: + return Response( + {"detail": "not allowed"}, status=status.HTTP_400_BAD_REQUEST + ) + + action = request.data.get("action") + if action == "feature": + user.is_featured = True + user.save() + elif action == "unfeature": + user.is_featured = False + user.save() + + serializer = UserDetailSerializer(user, context={"request": request}) + return Response(serializer.data) + + def delete(self, request, username, format=None): + # Delete a user + user = self.get_user(username) + if isinstance(user, Response): + return user + + user.delete() + return Response(status=status.HTTP_204_NO_CONTENT) diff --git a/uwsgi.ini b/uwsgi.ini new file mode 100644 index 0000000..63f692e --- /dev/null +++ b/uwsgi.ini @@ -0,0 +1,27 @@ +[uwsgi] + +chdir = /home/mediacms.io/mediacms/ +virtualenv = /home/mediacms.io +module = cms.wsgi + +uid=www-data +gid=www-data + +processes = 2 +threads = 2 + +master = true + +socket = 127.0.0.1:9000 +#socket = /home/mediacms.io/mediacms/deploy/uwsgi.sock + + +workers = 2 + + +vacuum = true + +logto = /home/mediacms.io/mediacms/logs/errorlog.txt + +disable-logging = true +